- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 40 for walkFn (0.18 sec)
-
cmd/batch-handlers.go
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, walkCh, WalkOptions{ Marker: lastObject, Filter: selectObj, AskDisks: walkQuorum, }); err != nil { cancel() // Do not need to retry if we can't list objects on source.
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Tue Jun 11 03:13:30 UTC 2024 - 56K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc
// Walk the input and output dependencies of the Ops in `operations` to form // the closer of Ops needed to evaluate 'operations'. Input dependencies are // walked if 'predecessors' is true and output dependencies are walked if // 'successors' is true. In either case, if a discoverd Op is in the // 'ops_to_avoid' set, then the dependency walking is terminated.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 92.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
tensorflow::SerializeMlirModule(module_for_func.get()); } // Returns whether `op` or ops nested in `op` are outside compiled. bool HasOutsideCompilationNested(Operation* op) { return op ->walk([&](Operation* walked_op) { if (op == walked_op) return WalkResult::advance(); if (walked_op->hasAttr(kXlaOutsideCompilationAttr)) { return WalkResult::interrupt(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/debug.go
// Populate "rval" with what we've computed. rval.Slots = state.slots rval.VarSlots = state.varSlots rval.Vars = state.vars rval.LocationLists = state.lists } // liveness walks the function in control flow order, calculating the start // and end state of each block. func (state *debugState) liveness() []*BlockDebug { blockLocs := make([]*BlockDebug, state.f.NumBlocks()) counterTime := int32(1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 19:44:43 UTC 2024 - 58.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_import.cc
std::vector<mlir::func::FuncOp> to_delete_funcs; module.walk([&](mlir::vhlo::ReduceOpV1 reduce_op) { auto body = symbol_table.lookup<mlir::func::FuncOp>( reduce_op->getAttr("body").cast<mlir::FlatSymbolRefAttr>().getValue()); InlineVhloOpRegion(reduce_op.getBody(), body); reduce_op->removeAttr("body"); to_delete_funcs.push_back(body); }); module.walk([&](mlir::vhlo::ReduceWindowOpV1 reduce_window_op) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 66.8K bytes - Viewed (0) -
src/runtime/traceback.go
package runtime import ( "internal/abi" "internal/bytealg" "internal/goarch" "internal/stringslite" "runtime/internal/sys" "unsafe" ) // The code in this file implements stack trace walking for all architectures. // The most important fact about a given architecture is whether it uses a link register. // On systems with link registers, the prologue for a non-leaf function stores the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 55.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/regalloc.go
// New block. Clear candidate set. canLiveOnStack.clear() for _, c := range b.ControlValues() { if c.Uses == 1 && !opcodeTable[c.Op].generic { canLiveOnStack.add(c.ID) } } // Walking backwards. for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] if canLiveOnStack.contains(v.ID) { v.OnWasmStack = true } else {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 17:49:56 UTC 2023 - 87.2K bytes - Viewed (0) -
src/cmd/link/internal/ld/dwarf.go
return expandGoroot(fname) } // writeDirFileTables emits the portion of the DWARF line table // prologue containing the include directories and file names, // described in section 6.2.4 of the DWARF 4 standard. It walks the // filepaths for the unit to discover any common directories, which // are emitted to the directory table first, then the file table is // emitted after that.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 16:25:18 UTC 2024 - 72.4K bytes - Viewed (0) -
staging/src/k8s.io/apiextensions-apiserver/test/integration/ratcheting_test.go
nil, // No need for selectable fields ) return func(new, old *unstructured.Unstructured) { _ = strategy.ValidateUpdate(context.TODO(), new, old) }, nil } // Recursively walks the provided directory and parses the YAML files into // unstructured objects. If there are more than one object in a single file, // they are all added to the returned slice.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue May 28 08:48:22 UTC 2024 - 59.5K bytes - Viewed (0) -
src/runtime/map.go
// // When the hashtable grows, we allocate a new array // of buckets twice as big. Buckets are incrementally // copied from the old bucket array to the new bucket array. // // Map iterators walk through the array of buckets and // return the keys in walk order (bucket #, then overflow // chain order, then bucket index). To maintain iteration // semantics, we never move keys within their bucket (if // we did, keys might be returned 0 or 2 times). When
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 57.6K bytes - Viewed (0)