- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for Platen (0.23 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc
rhs, DenseIntElementsAttr::get( RankedTensorType::get({rhs_rank}, rewriter.getI64Type()), rhs_permutation)); // Reshapes lhs to flatten out_dimensions and contracting_dimensions. llvm::SmallVector<int64_t, 4> lhs_flattened_shape = Concat<int64_t>( lhs_dot_dimensions_info.batch_dimensions().SizesArray(), llvm::ArrayRef<int64_t>{
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc
llvm::StringRef equation, RankedTensorType lhs_ty) { llvm::StringRef lhs; llvm::StringRef out; std::tie(lhs, out) = equation.split("->"); if (lhs.empty() || out.empty()) return std::nullopt; // Try to flatten the "..." if possible. int lhs_named_label, rhs_named_label; // following rhs and rhs_ty variables are non-functional here only created to // comply with the existing API llvm::StringRef rhs;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 33.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
// Add inline pass. pass_manager.addPass(mlir::createInlinerPass()); // Expands mhlo.tuple ops. pass_manager.addPass( mlir::mhlo::createExpandHloTuplesPass(entry_function_name.str())); // Flatten tuples for control flows. pass_manager.addNestedPass<mlir::func::FuncOp>( mlir::mhlo::createFlattenTuplePass()); mlir::odml::AddMhloOptimizationPasses( pass_manager,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
} // Determine if op commutes with transposes. Requires a strict // definition of Elementwise, all i/o shapes and types must be same-rank // broadcastable and fully static. Consider moving this into attribute later. bool IsElementwise(Operation *op) { if (!(llvm::isa<TFL::AddOp, TFL::MulOp, TFL::DivOp, TFL::SubOp, TFL::MaximumOp, TFL::MinimumOp>(op))) { return false; } auto opr1_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc
// Guarantee all functions have one use, which enables more exact shape // inference. pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass()); // Run shape inference so that tf_executor/tf_device ops created later will // likely to inherit more concrete types. pm.addPass(mlir::TF::CreateTFShapeInferencePass()); pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUPartitionedOpConversionPass()); pm.addNestedPass<FuncOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 16:09:14 UTC 2024 - 11.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
// with appropriate shape to match with the shape of XlaDotV2 result. // We didn't apply XlaEinsum or XlaDotV2 for this work, since it would loose // the chance for constant folding later. We could try to add some // postprocessing passes later to further optimize the graph after constant // folding. Value CreateZeroPointPartialOffsetXlaDotV2( OpBuilder &builder, Location loc, Value tensor,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/export_utils.cc
if (stateless && stateless.getValue()) *node_def->mutable_op() = "Stateless" + node_def->op(); } // Add inputs to the NodeDef based on the number of operands. This is required // as later when edges are added to the Node using Graph::AddEdge the // associated NodeDef is not updated. for (int i = 0, e = inst->getNumOperands(); i < e; ++i) { node_def->add_input(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 19.7K bytes - Viewed (0) -
tensorflow/c/kernels_experimental.cc
// because a race condition can happen between this and another thread that // turns off some variable's `copy_on_read_mode` after this thread enables // sparse access; when a later function sees `copy_on_read_mode` is off, it // will try to lock the variable again for updating `copy_on_read_mode` and // cause the deadlock, since the variable mutex is non-re-entrant. for (auto* var : vars) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 06:12:29 UTC 2024 - 30.9K bytes - Viewed (0)