- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 20 for Platen (0.07 sec)
-
tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc
// Guarantee all functions have one use, which enables more exact shape // inference. pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass()); // Run shape inference so that tf_executor/tf_device ops created later will // likely to inherit more concrete types. pm.addPass(mlir::TF::CreateTFShapeInferencePass()); pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUPartitionedOpConversionPass()); pm.addNestedPass<FuncOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 16:09:14 UTC 2024 - 11.2K bytes - Viewed (0) -
tensorflow/c/eager/unified_api_testutil.cc
// Returning null tensors from a tf.function is not supported, so we keep // track of indices in the model's outputs are nullptr in this set. // The FunctionDef only outputs the non-null tensors. We later pad the // function op outputs to have nullptrs at the `null_indices`. absl::flat_hash_set<int> null_indices; { AbstractContextPtr func_ctx(BuildFunction(fn_name));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 27 13:57:45 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/jit/cluster_scoping_pass.cc
// Node_Y will receive both scopes "unstage" and "stage", while Node_X receives // only scope "stage". The semantic of scope "unstage" is preserved although // scope "stage" is later appended. As a result, Node_X and Node_Y will be put // into different clusters. // // Unstage -> Node_Y (scope "unstage & stage") // | // V
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.h
// Generate a Max and an ArgMax of as the mhlo op returns both while in TF // we have separate ops for them. If only one of them is used then the other // one will be garbage collected later. if (!mlir::isa<ShapedType>(operand.getType())) return failure(); auto operand_type = mlir::cast<ShapedType>(operand.getType()); if (operand_type.getElementType().isInteger(1)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/constant_fold_utils.cc
return false; } // If any of the result types are variants, don't try to constant fold them. // This creates opaque variant constants which lose information and would // require "raising" later. for (const Type type : inst->getResultTypes()) { if (const TensorType tensor_type = mlir::dyn_cast<TensorType>(type)) { if (mlir::isa<VariantType>(tensor_type.getElementType())) { return false; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
// with appropriate shape to match with the shape of XlaDotV2 result. // We didn't apply XlaEinsum or XlaDotV2 for this work, since it would loose // the chance for constant folding later. We could try to add some // postprocessing passes later to further optimize the graph after constant // folding. Value CreateZeroPointPartialOffsetXlaDotV2( OpBuilder &builder, Location loc, Value tensor,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/export_utils.cc
if (stateless && stateless.getValue()) *node_def->mutable_op() = "Stateless" + node_def->op(); } // Add inputs to the NodeDef based on the number of operands. This is required // as later when edges are added to the Node using Graph::AddEdge the // associated NodeDef is not updated. for (int i = 0, e = inst->getNumOperands(); i < e; ++i) { node_def->add_input(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 19.7K bytes - Viewed (0) -
tensorflow/c/kernels_experimental.cc
// because a race condition can happen between this and another thread that // turns off some variable's `copy_on_read_mode` after this thread enables // sparse access; when a later function sees `copy_on_read_mode` is off, it // will try to lock the variable again for updating `copy_on_read_mode` and // cause the deadlock, since the variable mutex is non-re-entrant. for (auto* var : vars) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 06:12:29 UTC 2024 - 30.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc
Value n_plus_y = rewriter.create<AddOp>(loc, iotaN, y); // GatherOp is happy about letting us index out of bounds values, but those // values will be undefined. So we mask them later. Set up the boolean // expression that tells us which entries, in the output shape, are out of // bounds and thus become the padding_value. Value x_in_bounds = rewriter.create<AndOp>( loc,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 291.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
Creates a dataset that applies `f` to the outputs of `input_dataset`. }]; let description = [{ Unlike MapDataset, the `f` in FlatMapDataset is expected to return a Dataset variant, and FlatMapDataset will flatten successive results into a single Dataset. }]; let arguments = (ins TF_VariantTensor:$input_dataset, Variadic<TF_Tensor>:$other_arguments, SymbolRefAttr:$f,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0)