- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 237 for Operands (0.15 sec)
-
src/cmd/internal/obj/link.go
type Prog struct { Ctxt *Link // linker context Link *Prog // next Prog in linked list From Addr // first source operand RestArgs []AddrPos // can pack any operands that not fit into {Prog.From, Prog.To}, same kinds of operands are saved in order To Addr // destination operand (second is RegTo2 below) Pool *Prog // constant pool entry, for arm,arm64 back ends Forwd *Prog // for x86 back end
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 33.1K bytes - Viewed (0) -
src/math/big/float.go
// This file implements multi-precision floating-point numbers. // Like in the GNU MPFR library (https://www.mpfr.org/), operands // can be of mixed precision. Unlike MPFR, the rounding mode is // not specified with each operation, but with each operand. The // rounding mode of the result operand determines the rounding // mode of an operation. This is a from-scratch implementation. package big import ( "fmt" "math"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Jun 06 15:46:54 UTC 2024 - 44.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_freeze_variables.cc
PropagateUsage(user_op, operand_index, value, work_list, arguments_to_erase); } } // Traces usage of 'var_handle_op' and replaces it's usage with constant value // 'value'. // All op operands updates are captured in 'arguments_to_erase'. void ReplaceVarWithConstant( TF::VarHandleOp var_handle_op, ElementsAttr value, llvm::MapVector<Operation*, llvm::SmallVector<unsigned int, 4>>*
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 09:56:53 UTC 2024 - 19.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_cluster_formation.cc
<< "requires " << num_cores_per_replica << " operands but found " << pi->getNumOperands(); for (auto operand : pi.getInputs()) { if (auto ri = llvm::dyn_cast_or_null<mlir::TF::TPUReplicatedInputOp>( operand.getDefiningOp())) { if (!seen_ops.contains(ri)) { seen_ops.insert(ri);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 39.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
} // This pass should be always at the end of the model // conversion (even after quantization). Some TFL ops like unidirectional // sequence lstm will have stateful operands and some optimization passes // will merge those operands if they have identical values & types. However, // it's not desired by TFL. This pass serves as a "fix" pass to split the // merged inputs until we have 1st class variable support or reuse
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
src/cmd/compile/internal/types2/call.go
if n := len(elist); n == 1 { xlist, _ = check.multiExpr(elist[0], false) } else if n > 1 { // multiple (possibly invalid) values xlist = make([]*operand, n) for i, e := range elist { var x operand check.expr(nil, &x, e) xlist[i] = &x } } return } // genericExprList is like exprList but result operands may be uninstantiated or partially
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 19:19:55 UTC 2024 - 31.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops.cc
TFConstantFoldInterface(Dialect *dialect) : DialectFoldInterface(dialect) {} LogicalResult fold(Operation *op, ArrayRef<Attribute> operands, SmallVectorImpl<OpFoldResult> &results) const final { return TensorFlowDialect::constantFold(op, operands, results); } }; // Helper function that implements the multi-device inlining policy behavior
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_sharding_identification_pass.cc
// Look up function definition from module. Block& function_block = func.front(); sharding_for_args.reserve(function_block.getNumArguments()); // Iterate through operands of `cluster_func`. // The computation operand can either be: // 1) a TPUPartitionedInput Op if the input has a non-resource type; // 2) a ReadVariableOp else. // // Iterate through input arguments to the entry block of
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 28.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td
// BatchMatMul op patterns. //===----------------------------------------------------------------------===// // Static shaped operands in a legal BatchMatMul op will have matching batch // dimensions and can be upgraded to the BatchMatMulV2 op. Canonicalizing // dynamically shaped operands is not correct as that will execute ops that // have non matching batch dimensions but are broadcastable which should fail // with V1.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 06 18:42:28 UTC 2023 - 17K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
// A heuristic to determine whether the scales needs to be from operands or // from results for the ops with the `SameOperandsAndResultsScale` property. // The current implementation is based on the number of operands. static bool PreferResultScale(Operation* op) { int float_operands = 0; for (auto operand : op->getOperands()) { if (auto operand_type = dyn_cast<ShapedType>(operand.getType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0)