- Sort Score
- Result 10 results
- Languages All
Results 101 - 110 of 137 for computations (0.3 sec)
-
tensorflow/compiler/jit/ops/xla_ops.cc
for (int i = 0; i < c->num_outputs(); ++i) { c->set_output(i, c->input(0)); } return absl::OkStatus(); }) .Doc( "Operator that connects the output of an XLA computation to other " "consumer graph nodes."); REGISTER_OP("_XlaCompile") .Input("constants: Tconstants") .Attr("Tconstants: list(type) >= 0") .Attr("must_compile: bool") .Input("args: Targs")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 09:08:06 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_avg_pool_patterns.td
// Replaces aten.avg_pool2d with ceil mode with (T -> tfl.pad -> tfl.average_pool_2d -> mul -> T). Multiplies by a constant // which corrects the overcounting of divisors that would occur if doing this computation on a padded tensor with ceil mode off. def LegalizeAvgPoolCeilModeTrue: Pat< (MHLO_CompositeOp:$old_val (variadic $a_input),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:16:05 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
// the quantized inputs, performing the operation in the expressed type, then // requantizing if a quantized output is required. // // The motivation behind these changes is for Dialects that assume only float // or quantized computation, and do not support a mixture of these types on // dense operations. Decomposition allows TFLite to be compiled to these // dialects, such as TOSA. #include <utility>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.8K bytes - Viewed (0) -
src/runtime/mksizeclasses.go
// Let F ← log₂(d) and c = 1. // else // Let F ← N + L where L is the smallest integer // such that d ≤ (2^(N+L) mod d) + 2^L. // end if // // [1] "Faster Remainder by Direct Computation: Applications to // Compilers and Software Libraries" Daniel Lemire, Owen Kaser, // Nathan Kurz arXiv:1902.01961 // // To minimize the risk of introducing errors, we implement the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:31:27 UTC 2024 - 9.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.h
// as the reduction. Value iota = reduce_op.getInputs().back(); if (!MatchIota(reduce_op.getDimensions(), iota)) return failure(); // Match the reduction computation. const bool is_float = mlir::isa<FloatType>(operand_init.getElementType()); if (failed(MatchReduceToArgMinMaxType1(reduce_op, is_float, is_argmax)) && failed(MatchReduceToArgMinMaxType2(reduce_op, is_argmax)))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization.cc
"means that a shape or dimension argument could not be evaluated at " "compile time, usually because the value of the argument depends on a " "parameter to the computation, on a variable, or on a stateful operation " "such as a random number generator."; // TODO(b/282188914) remove the operations to skip once tests are fixed. static const DenseSet<mlir::TypeID>* operations_to_skip =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
src/vendor/golang.org/x/net/idna/punycode.go
package idna // This file implements the Punycode algorithm from RFC 3492. import ( "math" "strings" "unicode/utf8" ) // These parameter values are specified in section 5. // // All computation is done with int32s, so that overflow behavior is identical // regardless of whether int is 32-bit or 64-bit. const ( base int32 = 36 damp int32 = 700 initialBias int32 = 72 initialN int32 = 128
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 09 20:10:36 UTC 2021 - 4.6K bytes - Viewed (0) -
tensorflow/c/kernels/histogram_summary_op.cc
using Safe_TF_TensorPtr = std::unique_ptr<TF_Tensor, TFTensorDeleter>; using Safe_TF_StatusPtr = std::unique_ptr<TF_Status, TFStatusDeleter>; // Used to pass the operation node name from kernel construction to // kernel computation. struct HistogramSummaryOp { std::string op_node_name; }; void* HistogramSummaryOp_Create(TF_OpKernelConstruction* ctx) { HistogramSummaryOp* kernel = new HistogramSummaryOp;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h
void EncodeSharding(mlir::Operation* op, llvm::StringRef shard_str); // Parses "input_sharding_configuration" attribute and returns a list where i-th // element is a list of mlir::Value's which represent inputs for the TPU // computation corresponding to i-th logical device. If the attribute does not // exist, the all inputs are placed on logical core 0. mlir::LogicalResult ExtractInputsForLogicalDevices(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 28 22:18:34 UTC 2024 - 6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir
%3 = "tf.BatchMatMulV2"(%input, %2) { attr_map = "adj_x:0,adj_y:1" } : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> func.return %3 : tensor<*xf32> } // DepthwiseConv2D with float computation func.func private @internal_depthwise_conv2d_fn( %input : tensor<*xf32>, %filter : tensor<*xi8>) -> tensor<*xf32> { // Use identity op to avoid the filter being constant-folded.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 7K bytes - Viewed (0)