- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 169 for computations (0.18 sec)
-
src/vendor/golang.org/x/crypto/sha3/shake.go
// a customizable variant of SHAKE128. // N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is // desired. S is a customization byte string used for domain separation - two cSHAKE // computations on same input with different S yield unrelated outputs. // When N and S are both empty, this is equivalent to NewShake128. func NewCShake128(N, S []byte) ShakeHash { if len(N) == 0 && len(S) == 0 { return NewShake128()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 16:19:04 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.cc
compilation_result->computation->proto(), xla::DebugOptions())); TF_ASSIGN_OR_RETURN( std::unique_ptr<xla::HloModule> hlo_module, xla::HloModule::CreateFromProto(compilation_result->computation->proto(), hlo_module_config)); std::string all_computations; for (auto computation : hlo_module->computations()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 00:40:46 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc
// from TF-Quantizer to stableHLO quantization bool IsOpWithDataMovementTrait(Operation* op) { // Supported data movement ops. These ops do not perform any computations and // has one result operand. return isa<TF::IdentityOp, TF::CastOp, TF::ReshapeOp, TF::XlaShardingOp, TF::GatherOp, TF::GatherV2Op, TF::XlaGatherOp, TF::ExpandDimsOp,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h
if (!operand_type.hasStaticShape() || !indices_type.hasStaticShape() || !updates_type.hasStaticShape()) { return failure(); } // Match the scatter computation against computations supported by TF. if (failed(MatchBinaryReduceFunction<BinaryOp>( scatter_op.getUpdateComputation()))) { return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.1K bytes - Viewed (0) -
src/cmd/compile/internal/syntax/positions.go
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements helper functions for scope position computations. package syntax // StartPos returns the start position of n. func StartPos(n Node) Pos { // Cases for nodes which don't need a correction are commented out. for m := n; ; { switch n := m.(type) { case nil:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 17:49:19 UTC 2024 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h
mlir::TF::RuntimeDevices devices, std::string* host_device); // Parses XLA compilation and execution devices from a tf_device.cluster and // returns the host device for the head and tail computations. For TPU device, // if the computation is replicated, GetDeviceAliasForHostOfLogicalCore(0) is // returned instead. mlir::LogicalResult GetHostDeviceOutsideComputation( mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 11.3K bytes - Viewed (0) -
src/time/time.go
// suggest a representation, namely using 1-1-1 00:00:00 UTC as the // epoch, and that's what we do. // // The Add and Sub computations are oblivious to the choice of epoch. // // The presentation computations - year, month, minute, and so on - all // rely heavily on division and modulus by positive constants. For // calendrical calculations we want these divisions to round down, even
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 50.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc
location, output_type, concat_dimension_op.getOutput(), inputs); } // For tile sharded inputs to TPU computation, inject split op between the // input values and TPU computation so that tiled input values are passed in // as inputs to TPU computations. If more than one dimension is sharded, then // a tree of connected split ops are added before tf_device.parallel_execute op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 21:28:13 UTC 2024 - 34K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.td
let summary = "Decompose composite resource variable operations into primitive Read/AssignVariableOp and raw computation."; let description = [{ A pass that decomposes composite resource operations into primitive ones like ReadVariableOp, AssignVariableOp and other computations to facilitate transformations like resource op lifting. For example: ```mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/passes.h
// Creates a pass that lifts operations on external resource variables from // device computation nested in `tf_device::LaunchOp` out so that resource // variable load operations are all before device computation while resource // variable store operations are all after device computation. After this pass, // device computation no longer interacts with external resource variables.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 31.8K bytes - Viewed (0)