8000 fix spelling across the board · NVIDIA/aistore@2e38d81 · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Commit 2e38d81

Browse files
committed
fix spelling across the board
Signed-off-by: Alex Aizman <alex.aizman@gmail.com>
1 parent dd82103 commit 2e38d81

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

82 files changed

+105
-105
lines changed

ais/backend/oci.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ func NewOCI(t core.TargetPut, tstats stats.Tracker, startingUp bool) (core.Backe
144144
// OCI_CLI_RC_FILE ENV that defaults to ~/.oci/oci_cli_rc), it (like the configFile)
145145
// is in so-called INI file format. A default for the Profile is found at
146146
// [OCI_CLI_SETTINGS]default_profile. That Profile (defaults to "DEFAULT") would still
147-
// be overidden by the presence of a (non-empty) OCI_CLI_PROFILE ENV. The selected
147+
// be overridden by the presence of a (non-empty) OCI_CLI_PROFILE ENV. The selected
148148
// or defaulted Profile would then define a section in the rcFile (as it does in the
149149
// configFile) with a key named "compartment-id". That value of that key would be used
150150
// if the OCI_COMPARTMENT_OCID ENV is not set or empty. Parsing the rcFile is apparently

ais/htrun.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -741,7 +741,7 @@ func _doResp(args *callArgs, req *http.Request, resp *http.Response, res *callRe
741741
}
742742

743743
// read and decode via call-result-value (`cresv`), if provided;
744-
// othwerwise, read and return bytes for the caller to unmarshal
744+
// otherwise, read and return bytes for the caller to unmarshal
745745
if args.cresv != nil {
746746
res.v = args.cresv.newV()
747747
args.cresv.read(res, resp.Body)

ais/prxclu.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2010,7 +2010,7 @@ func (p *proxy) httpcludel(w http.ResponseWriter, r *http.Request) {
20102010
return
20112011
}
20122012

2013-
// primary (and cluster) to start and finalize rebalancing status _prior_ to removing invidual nodes
2013+
// primary (and cluster) to start and finalize rebalancing status _prior_ to removing individual nodes
20142014
if err := p.pready(smap, true); err != nil {
20152015
p.writeErr(w, r, err, http.StatusServiceUnavailable)
20162016
return

ais/psetforce.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -736,7 +736,7 @@ func (h *htrun) _commitForceJoin(w http.ResponseWriter, r *http.Request, msg *ac
736736
return
737737
}
738738

739-
// update cluMeta in mem (= desination, brute force)
739+
// update cluMeta in mem (= destination, brute force)
740740
nconfig := &ncm.Config.ClusterConfig
741741
if err := cmn.GCO.Update(nconfig); err != nil {
742742
err = fmt.Errorf("%s failed to update config %s: %v", tag, nconfig.String(), err)

ais/test/archive_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ func TestGetFromArch(t *testing.T) {
191191
})
192192
}
193193

194-
// archive multple obj-s with an option to append if exists
194+
// archive multiple obj-s with an option to append if exists
195195
func TestArchMultiObj(t *testing.T) {
196196
tools.CheckSkip(t, &tools.SkipTestArgs{Long: true})
197197
runProviderTests(t, func(t *testing.T, bck *meta.Bck) {

ais/test/bucket_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2760,7 +2760,7 @@ func testCopyBucketPrepend(t *testing.T, srcBck cmn.Bck, m *ioContext) {
27602760
tools.DestroyBucket(t, proxyURL, dstBck)
27612761
})
27622762

2763-
tlog.Logf("Wating for x-%s[%s] %s => %s\n", apc.ActCopyBck, xid, srcBck.String(), dstBck.String())
2763+
tlog.Logf("Waiting for x-%s[%s] %s => %s\n", apc.ActCopyBck, xid, srcBck.String(), dstBck.String())
27642764
args := xact.ArgsMsg{ID: xid, Kind: apc.ActCopyBck, Timeout: time.Minute}
27652765
_, err = api.WaitForXactionIC(baseParams, &args)
27662766
tassert.CheckFatal(t, err)
@@ -2785,7 +2785,7 @@ func testCopyBucketPrefix(t *testing.T, srcBck cmn.Bck, m *ioContext, expected i
27852785
tools.DestroyBucket(t, proxyURL, dstBck)
27862786
})
27872787

2788-
tlog.Logf("Wating for x-%s[%s] %s => %s\n", apc.ActCopyBck, xid, srcBck.String(), dstBck.String())
2788+
tlog.Logf("Waiting for x-%s[%s] %s => %s\n", apc.ActCopyBck, xid, srcBck.String(), dstBck.String())
27892789
args := xact.ArgsMsg{ID: xid, Kind: apc.ActCopyBck, Timeout: time.Minute}
27902790
_, err = api.WaitForXactionIC(baseParams, &args)
27912791
tassert.CheckFatal(t, err)
@@ -2857,7 +2857,7 @@ func testCopyBucketDryRun(t *testing.T, srcBck cmn.Bck, m *ioContext) {
28572857
tools.DestroyBucket(t, proxyURL, dstBck)
28582858
})
28592859

2860-
tlog.Logf("Wating for x-%s[%s]\n", apc.ActCopyBck, xid)
2860+
tlog.Logf("Waiting for x-%s[%s]\n", apc.ActCopyBck, xid)
28612861
args := xact.ArgsMsg{ID: xid, Kind: apc.ActCopyBck, Timeout: time.Minute}
28622862
_, err = api.WaitForXactionIC(baseParams, &args)
28632863
tassert.CheckFatal(t, err)
@@ -2901,7 +2901,7 @@ func testCopyBucketMultiWorker(t *testing.T, srcBck cmn.Bck, m *ioContext) {
29012901
tools.DestroyBucket(t, proxyURL, dstBck)
29022902
})
29032903

2904-
tlog.Logf("Wating for x-%s[%s] %s => %s\n", apc.ActCopyBck, xid, srcBck.String(), dstBck.String())
2904+
tlog.Logf("Waiting for x-%s[%s] %s => %s\n", apc.ActCopyBck, xid, srcBck.String(), dstBck.String())
29052905
args := xact.ArgsMsg{ID: xid, Kind: apc.ActCopyBck, Timeout: time.Minute}
29062906
_, err = api.WaitForXactionIC(baseParams, &args)
29072907
tassert.CheckFatal(t, err)

ais/test/config_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ func TestConfigSyncToNewNode(t *testing.T) {
273273
_, err = tools.WaitForClusterState(proxyURL, "proxy restored", smap.Version, origProxyCnt, origTargetCnt)
274274
tassert.CheckFatal(t, err)
275275

276-
// 4. Ensure the proxy has lastest updated config
276+
// 4. Ensure the proxy has latest updated config
277277
daemonConfig := tools.GetDaemonConfig(t, proxy)
278278
tassert.Fatalf(t, daemonConfig.EC.Enabled == newECEnabled,
279279
"expected 'ec.Enabled' to be %v, got: %v", newECEnabled, daemonConfig.EC.Enabled)

ais/test/downloader_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1198,7 +1198,7 @@ func TestDownloadSync(t *testing.T) {
11981198
dlBody.Sync = false
11991199
downloadObjectRemote(t, dlBody, m.num, m.num)
12001200

1201-
tlog.Logln("5. overridding the objects and deleting some of them...")
1201+
tlog.Logln("5. overriding the objects and deleting some of them...")
12021202
m.remotePuts(false /*evict*/, true /*override*/)
12031203
m.del(objsToDelete)
12041204

ais/test/ec_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -605,7 +605,7 @@ func TestECChange(t *testing.T) {
605605
bucketProps.EC.Enabled = apc.Ptr(true)
606606
bucketProps.EC.ObjSizeLimit = apc.Ptr[int64](300000)
607607
_, err = api.SetBucketProps(baseParams, bck, bucketProps)
608-
tassert.Errorf(t, err != nil, "Modifiying EC properties must fail")
608+
tassert.Errorf(t, err != nil, "Modifying EC properties must fail")
609609

610610
tlog.Logln("Resetting bucket properties")
611611
_, err = api.ResetBucketProps(baseParams, bck)

ais/test/etl_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1192,7 +1192,7 @@ def transform(input_bytes):
11921192
msg := etl.InitCodeMsg{
11931193
InitMsgBase: etl.InitMsgBase{
11941194
EtlName: test.etlName,
1195-
CommTypeX: etl.Hpush, // TODO: enalbe runtime error retrieval for hpull in inline transform calls
1195+
CommTypeX: etl.Hpush, // TODO: enable runtime error retrieval for hpull in inline transform calls
11961196
Timeout: etlBucketTimeout,
11971197
},
11981198
Code: []byte(strings.Replace(failureTransformFunc, "<EXIT_CODE>", exitCode, 1)),

ais/test/namespace_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ func listAllBuckets(t *testing.T, baseParams api.BaseParams, includeRemote bool,
3737
fltPresence)
3838
tassert.CheckFatal(t, err)
3939

40-
// TODO -- FIXME: do intead smth like: `remoteClusterBuckets.Equal(allRemaisBuckets)`
40+
// TODO -- FIXME: do instead smth like: `remoteClusterBuckets.Equal(allRemaisBuckets)`
4141
tassert.Errorf(
4242
t, len(remoteClusterBuckets) == len(allRemaisBuckets),
4343
"specific namespace %q => %v, while all-remote %q => %v, where presence=%d\n",

ais/test/object_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1473,7 +1473,7 @@ func Test_checksum(t *testing.T) {
14731473

14741474
m.remotePuts(true /*evict*/)
14751475

1476-
// Disable checkum.
1476+
// Disable checksum.
14771477
if p.Cksum.Type != cos.ChecksumNone {
14781478
propsToSet := &cmn.BpropsToSet{
14791479
Cksum: &cmn.CksumConfToSet{

ais/test/objprops_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ func propsReadObjects(t *testing.T, proxyURL string, bck cmn.Bck, lst map[string
105105
tlog.Logf("Versions changed: %d (%s)\n", versChangedFinal-versChanged, cos.ToSizeIEC(bytesChangedFinal-bytesChanged, 1))
106106
}
107107
if versChanged != versChangedFinal || bytesChanged != bytesChangedFinal {
108-
t.Fatalf("All objects must be retreived from the cache but cold get happened: %d times (%d bytes)",
108+
t.Fatalf("All objects must be retrieved from the cache but cold get happened: %d times (%d bytes)",
109109
versChangedFinal-versChanged, bytesChangedFinal-bytesChanged)
110110
}
111111
}

ais/test/oci/mp_tuning.env

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ export OCI_MULTI_PART_DOWNLOAD_MAX_THREADS="4"
2424
# or equal to OCI_MULTI_PART_UPLOAD_THRESHOLD, we will simply perform a simple PUT upload.
2525
# Otherwise, we will shift to MultiPartUpload ("MPD") mode and PUT segments of (up to)
2626
# OCI_MAX_UPLOAD_SEGMENT_SIZE using (up to) OCI_MULTI_PART_UPLOAD_MAX_THREADS at a time
27-
# until enought threads have covered the total object size.
27+
# until enough threads have covered the total object size.
2828

2929
export OCI_MAX_UPLOAD_SEGMENT_SIZE="100000"
3030
export OCI_MULTI_PART_UPLOAD_THRESHOLD="200000"

ais/test/promote_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ func (test *prmTests) do(t *testing.T, bck *meta.Bck) {
176176
cnt, cntsub := countFiles(t, tempdir)
177177
if !test.deleteSrc {
178178
tassert.Errorf(t, cnt == test.num && cntsub == test.num,
179-
"delete-src == false: expected cnt (%d) == cntsub (%d) == num (%d) gererated",
179+
"delete-src == false: expected cnt (%d) == cntsub (%d) == num (%d) generated",
180180
cnt, cntsub, test.num)
181181
}
182182

ais/tgtobj.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1634,7 +1634,7 @@ func (coi *coi) _dryRun(lom *core.LOM, objnameTo string) (res xs.CoiRes) {
16341634
// PUT lom => dst
16351635
// NOTE: no assumpions are being made on whether the source lom is present in cluster.
16361636
// (can be a "pure" metadata of a (non-existing) Cloud object; accordingly, GetROC must
1637-
// be able to hande cold get, warm get, etc.)
1637+
// be able to handle cold get, warm get, etc.)
16381638
//
16391639
// If destination bucket is remote:
16401640
// - create a local replica of the object on one of the targets, and
@@ -1988,7 +1988,7 @@ func (a *putA2I) finalize(size int64, cksum *cos.Cksum, fqn string) error {
19881988
}
19891989

19901990
//
1991-
// put mirorr (main)
1991+
// put mirror (main)
19921992
//
19931993

19941994
func (t *target) putMirror(lom *core.LOM) {

api/apc/lsmsg.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ const (
3131
// expand archives as directories
3232
LsArchDir
3333

34-
// return only object names and, spearately, statuses
34+
// return only object names and, separately, statuses
3535
LsNameOnly
3636

3737
// same as above and size (minor speedup)

api/apc/multiobj.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ func (*PrefetchMsg) delim(sb *strings.Builder) {
100100
}
101101

102102
// ArchiveMsg contains the parameters (all except the destination bucket)
103-
// for archiving mutiple objects as one of the supported archive.FileExtensions types
103+
// for archiving multiple objects as one of the supported archive.FileExtensions types
104104
// at the specified (bucket) destination.
105105
// See also: api.PutApndArchArgs
106106
// -------------------- terminology ---------------------

api/apc/query.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ const (
146146

147147
// QparamFltPresence enum.
148148
//
149-
// Descibes both buckets and objects with respect to their existence/presence (or non-existence/non-presence)
149+
// Describes both buckets and objects with respect to their existence/presence (or non-existence/non-presence)
150150
// in AIS cluster.
151151
//
152152
// "FltPresent*" refers to availability ("presence") in the cluster. For details, see the values and comments below.

bench/micro/hashspeed/hash_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Package hashspeed is a benchmark througput benchmark
1+
// Package hashspeed is a benchmark throughput benchmark
22
/*
33
* Copyright (c) 2018-2025, NVIDIA CORPORATION. All rights reserved.
44
*/

bench/micro/map/main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ func parseCmdLine() (p params, err error) {
156156
}
157157

158158
os.Args = []string{os.Args[0]}
159-
flag.Parse() // Called so that imported packages don't compain
159+
flag.Parse() // Called so that imported packages don't complain
160160

161161
if p.minSize > p.maxSize {
162162
return params{}, fmt.Errorf("minsize %d greater than maxsize %d", p.minSize, p.maxSize)

bench/tools/aisloader/print.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ func writeHumanReadibleFinalStats(to io.Writer, t *sts) {
302302
}
303303
}
304304

305-
// writeStatus writes stats to the writter.
305+
// writeStatus writes stats to the specified io.Writer.
306306
// if final = true, writes the total; otherwise writes the interval stats
307307
func writeStats(to io.Writer, jsonFormat, final bool, s, t *sts) {
308308
if final {

cmd/authn/utils.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ func mergeBckACLs(toACLs, fromACLs bckACLList, cluIDFlt string) []*authn.BckACL
4848
}
4949

5050
// mergeClusterACLs appends cluster ACLs from fromACLs which are not in toACL.
51-
// If a cluster ACL is already in the list, its persmissions are updated.
51+
// If a cluster ACL is already in the list, its permissions are updated.
5252
// If cluIDFlt is set, only ACLs for cluster with this ID are appended.
5353
func mergeClusterACLs(toACLs, fromACLs cluACLList, cluIDFlt string) []*authn.CluACL {
5454
for _, n := range fromACLs {

cmd/cli/cli/bucket_hdlr.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -620,7 +620,7 @@ proceed:
620620

621621
case objName != "":
622622
// (1) list archive, or
623-
// (2) show (as in: HEAD) specied object, or
623+
// (2) show (as in: HEAD) specified object, or
624624
// (3) show part of a bucket that matches prefix = objName, or
625625
// (4) summarize part of a bucket that --/--
626626
if flagIsSet(c, listArchFlag) {

cmd/cli/cli/const.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -796,7 +796,7 @@ var (
796796
Name: "template",
797797
Usage: "Template to match object or file names; may contain prefix (that could be empty) with zero or more ranges\n" +
798798
"\t(with optional steps and gaps), e.g.:\n" +
799-
indent4 + "\t--template \"\" # (an empty or '*' template matches eveything)\n" +
799+
indent4 + "\t--template \"\" # (an empty or '*' template matches everything)\n" +
800800
indent4 + "\t--template 'dir/subdir/'\n" +
801801
indent4 + "\t--template 'shard-{1000..9999}.tar'\n" +
802802
indent4 + "\t--template \"prefix-{0010..0013..2}-gap-{1..2}-suffix\"\n" +
@@ -1138,7 +1138,7 @@ var (
11381138
indent4 + "\t - 'hpull' or 'hpull://' - same, but ETL container is expected to provide HTTP GET endpoint\n" +
11391139
indent4 + "\t - 'io' or 'io://' - for each request an aistore node will: run ETL container locally, write data\n" +
11401140
indent4 + "\t to its standard input and then read transformed data from the standard output\n" +
1141-
indent4 + "\t For more defails, see https://aiatscale.org/docs/etl#communication-mechanisms\n",
1141+
indent4 + "\t For more details, see https://github.com/NVIDIA/aistore/blob/main/docs/etl.md#communication-mechanisms",
11421142
}
11431143

11441144
funcTransformFlag = cli.StringFlag{

cmd/cli/cli/x509.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ var (
3434
}
3535
validateTLS = cli.Command{
3636
Name: cmdValidateTLS,
37-
Usage: "Check that all TLS certficates are identical",
37+
Usage: "Check that all TLS certificates are identical",
3838
ArgsUsage: optionalNodeIDArgument,
3939
Action: validateCertHandler,
4040
}

cmd/cli/cli/yap.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ func (a *putargs) parse(c *cli.Context, emptyDstOnameOK bool) (err error) {
157157
return err
158158
}
159159

160-
// best-effort parsing: (inline range) | (local file or directrory)
160+
// best-effort parsing: (inline range) | (local file or directory)
161161

162162
// inline "range" w/ no flag, e.g.: "/tmp/www/test{0..2}{0..2}.txt" ais://nnn/www
163163
pt, e1 := cos.ParseBashTemplate(a.src.abspath)

cmn/api.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -385,7 +385,7 @@ func (s AllBsummResults) Finalize(dsize map[string]uint64, testingEnv bool) {
385385
//
386386

387387
type (
388-
// ArchiveBckMsg contains parameters to archive mutiple objects from the specified (source) bucket.
388+
// ArchiveBckMsg contains parameters to archive multiple objects from the specified (source) bucket.
389389
// Destination bucket may the same as the source or a different one.
390390
// -------------------- NOTE on terminology: ---------------------
391391
// "archive" is any (.tar, .tgz/.tar.gz, .zip, .tar.lz4) formatted object often also called "shard"

cmn/cos/sync.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ type (
4444
s chan struct{}
4545
}
4646

47-
// DynSemaphore implements sempahore which can change its size during usage.
47+
// DynSemaphore implements semaphore which can change its size.
4848
DynSemaphore struct {
4949
c *sync.Cond
5050
size int

cmn/cos/uuid.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ func IsAlphaNice(s string) bool {
151151
return true
152152
}
153153

154-
// alpha-numeric++ including letters, numbers, dashes (-), and underscores (_)
154+
// alphanumeric++ including letters, numbers, dashes (-), and underscores (_)
155155
// period (.) is allowed except for '..' (OnlyPlus const)
156156
func CheckAlphaPlus(s, tag string) error {
157157
l := len(s)

core/lcopy.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ func (lom *LOM) haveMpath(mpath string) bool {
409409
// returns mountpath to relocate or copy this lom, or nil if none required/available
410410
// return fixHrw = true when lom is currently misplaced
411411
// - checks hrw location first, and
412-
// - checks copies (if any) against the current configuation and available mountpaths;
412+
// - checks copies (if any) against the current configuration and available mountpaths;
413413
// - does not check `fstat` in either case (TODO: configurable or scrub);
414414
func (lom *LOM) ToMpath() (mi *fs.Mountpath, fixHrw bool) {
415415
var (

docs/_posts/2022-08-15-dask-data-analysis.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ std_bed_count = df[' "Beds"'].std()
8484
dd.compute({"mean_price": mean_price, "mean_bed_count": bed_sum, "mean_size": mean_size, "std_price": std_price, "std_size", "std_bed_count": std_bed_count})
8585
```
8686

87-
Dask DataFrames also support more complex computations familiar to previous Pandas users such as calculating statistcs by group and filtering rows:
87+
Dask DataFrames also support more complex computations familiar to previous Pandas users such as calculating statistics by group and filtering rows:
8888

8989
```python
9090
# Mean list price of homes grouped by bed count

docs/_posts/2023-05-08-aisio-transforms-with-webdataset-pt-1.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ Wait for the job to complete:
133133
ais wait `YourSortJobID`
134134
```
135135

136-
Now we can see the ouptut shards as defined in the dSort job spec above, each containing a random set of the data samples.
136+
Now we can see the output shards as defined in the dSort job spec above, each containing a random set of the data samples.
137137

138138
```bash
139139
ais bucket ls ais://images -prefix shuffled

docs/_posts/2024-08-16-ishard.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ Despite all this, there is almost always a need to batch associated files that c
9494

9595
This is where `ishard` comes in.
9696

97-
Initial Sharding utility (`ishard`) is designed to create WebDataset-formatted shards from the original dataset without spliting computable samples. The ultimate goal is to allow users to treat AIStore as a vast data lake, where they can easily upload training data in its raw format, regardless of size and directory structure.
97+
Initial Sharding utility (`ishard`) is designed to create WebDataset-formatted shards from the original dataset without splitting computable samples. The ultimate goal is to allow users to treat AIStore as a vast data lake, where they can easily upload training data in its raw format, regardless of size and directory structure.
9898

9999
Next, use `ishard` to perform the sharding pre-process correctly and optimally. The only question users need to address boils down to: **How should `ishard` associate samples with their corresponding annotations/labels?**
100100

docs/_posts/2024-08-28-pytorch-integration.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ for names, data in loader:
7575
# Process data (e.g training loop)
7676
```
7777

78-
Whats going on here in the code? First, the user declares a dataset that can read from a given bucket(*) and passes this dataset to PyTorch DataLoader. Since the DataLoader can take advantage of multiprocessing and workers, samples can be fetched in parallel from our dataset which are then yielded by the DataLoader to the training code.
78+
What's going on here in the code? First, the user declares a dataset that can read from a given bucket(*) and passes this dataset to PyTorch DataLoader. Since the DataLoader can take advantage of multiprocessing and workers, samples can be fetched in parallel from our dataset which are then yielded by the DataLoader to the training code.
7979

8080
![PyTorch Workers](/assets/pytorch-dataloading/pytorch-dataloading.gif)
8181

@@ -314,4 +314,4 @@ Furthermore, we want data loading to be quick and easy for data scientists to us
314314
9. [WebDataset Website](https://webdataset.github.io/webdataset/webdataset/)
315315
10. [WebDataset Hugging Face](https://huggingface.co/docs/hub/datasets-webdataset )< 17A5 /span>
316316
11. [Training CIFAR-10 with AIStore Notebook](https://github.com/NVIDIA/aistore/blob/main/python/examples/aisio-pytorch/cifar10_training_example.ipynb)
317-
12. [Training ResNet50 with AIStore Notebook](https://github.com/NVIDIA/aistore/blob/main/python/examples/aisio-pytorch/resnet50_wds_train.ipynb)
317+
12. [Training ResNet50 with AIStore Notebook](https://github.com/NVIDIA/aistore/blob/main/python/examples/aisio-pytorch/resnet50_wds_train.ipynb)

docs/_posts/2025-02-26-oci-object-native-vs-s3-api.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ Utilizing 64 1MB segments more than doubles the performance of the S3 backend's
123123

124124
## Conclusion
125125

126-
This analysis hightlights how backend selection impacts performance in AIStore when working
126+
This analysis highlights how backend selection impacts performance in AIStore when working
127127
with OCI Object Storage. While the S3 API provides interoperability, the OCI Native API
128128
can outperform the S3 API in both reads and writes when properly tuned.
129129

docs/aisloader.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ Following is a brief illustrated sequence to enable detailed tracing, capture st
447447
**IMPORTANT NOTE:**
448448
> The amount of generated (and extremely detailed) metrics can put a strain on your StatsD server. That's exactly the reason for runtime switch to **toggle** HTTP tracing on/off. The example below shows how to do it (in particular, see `kill -HUP`).
449449
450-
### 1. Run aisloader for 90s (32 workes, 100% write, sizes between 1KB and 1MB) with detailed tracing enabled:
450+
### 1. Run aisloader for 90s (32 workers, 100% write, sizes between 1KB and 1MB) with detailed tracing enabled:
451451

452452
```console
453453
$ aisloader -bucket=ais://abc -duration 90s -numworkers=32 -minsize=1K -maxsize=1M -pctput=50 --cleanup=false --trace-http=true

0 commit comments

Comments
 (0)
0