- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 68 for computations (0.15 sec)
-
src/cmd/go/main.go
if cfg.ExperimentErr != nil { base.Fatal(cfg.ExperimentErr) } } // Set environment (GOOS, GOARCH, etc) explicitly. // In theory all the commands we invoke should have // the same default computation of these as we do, // but in practice there might be skew // This makes sure we all agree. cfg.OrigEnv = toolchain.FilterEnv(os.Environ()) cfg.CmdEnv = envcmd.MkEnv() for _, env := range cfg.CmdEnv {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 21:09:11 UTC 2024 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
// AssignVariableOps. std::unique_ptr<OperationPass<ModuleOp>> CreateUnfreezeConstantsPass(); // Creates a pass that duplicates constants that affect the shape of a tensor // after some computation. std::unique_ptr<OperationPass<func::FuncOp>> CreateDuplicateShapeDeterminingConstantsPass(); // Creates a pass that creates a RestoreV2 op in the initializer function with
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
se::Stream* stream = ctx->op_device_context() ? ctx->op_device_context()->stream() : nullptr; Allocator* allocator = ctx->device()->GetAllocator({}); // Computation output should always be a tuple. VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString(); VLOG(2) << "Result tuple shape (on device): " << output.on_device_shape().DebugString();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
std::vector<xla::ExecutionInput> execution_inputs, xla::ExecutableRunOptions run_options, xla::LocalExecutable* executable, OpKernelContext* ctx, se::DeviceMemoryAllocator* allocator) { VLOG(2) << "Executing Xla Computation."; Env* env = Env::Default(); auto start_time = env->NowMicros(); se::Stream* stream = GetStream(ctx); run_options.set_stream(GetStream(ctx)); run_options.set_allocator(allocator);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
src/cmd/compile/internal/types2/subst.go
} } case *Union: terms, copied := subst.termlist(t.terms) if copied { // term list substitution may introduce duplicate terms (unlikely but possible). // This is ok; lazy type set computation will determine the actual type set // in normal form. return &Union{terms} } case *Interface: methods, mcopied := subst.funcList(t.methods) embeddeds, ecopied := subst.typeList(t.embeddeds)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 03:04:07 UTC 2024 - 11K bytes - Viewed (0) -
src/go/types/subst.go
} } case *Union: terms, copied := subst.termlist(t.terms) if copied { // term list substitution may introduce duplicate terms (unlikely but possible). // This is ok; lazy type set computation will determine the actual type set // in normal form. return &Union{terms} } case *Interface: methods, mcopied := subst.funcList(t.methods) embeddeds, ecopied := subst.typeList(t.embeddeds)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 03:04:07 UTC 2024 - 11.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
// Internal tensors are those that do not flow in or out of the operation, // but instead are part of internal computation. As such, the operation's // implementation may manage its memory more efficiently. They are needed // however (i.e. not just an implementation detail) since they are part of the // computation, which may require relevant metadata such as quantization // parameters. intermediates:[int]; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
// // Computing gradient of the loss with respect to features gives us, // // backprop = (Exp(features) / Sum(Exp(features))) - labels // backprop = Softmax(features) - labels // // Computation of the reduction axis for the Sum op depends on whether the // input is a scalar or not. Restrict pattern to ranked inputs so that input to // the Sum op is also ranked.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0) -
staging/src/k8s.io/api/flowcontrol/v1/types.go
// - How are requests for this priority level limited? // - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { // `nominalConcurrencyShares` (NCS) contributes to the computation of the // NominalConcurrencyLimit (NominalCL) of this level. // This is the number of execution seats available at this priority level. // This is used both for requests dispatched from this priority level
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu May 23 17:42:49 UTC 2024 - 31.1K bytes - Viewed (0) -
pkg/kube/inject/inject_test.go
for i, c := range cases { if c.setFlags != nil || c.inFilePath != "" { writeInjectionSettings(t, fmt.Sprintf("%s.%d", c.in, i), c.setFlags, c.inFilePath) } } } // Preload default settings. Computation here is expensive, so this speeds the tests up substantially defaultTemplate, defaultValues, defaultMesh := readInjectionSettings(t, "default") for i, c := range cases { i, c := i, c
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Fri May 31 20:35:11 UTC 2024 - 34.1K bytes - Viewed (0)