- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for Platen (0.12 sec)
-
tensorflow/c/experimental/saved_model/core/revived_types/partially_revived_objects.cc
// Additionally, we take advantage of the fact that the SignatureDefFunction's // associated functiondef has lexicographically ordered inputs/outputs due to // nest.flatten. Status LoadSignatureDefFunctionMetadata( const SavedConcreteFunction& saved_concrete_function, SignatureDefFunctionMetadata* out) { std::vector<SignatureDefParam> args;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 09 20:11:48 UTC 2023 - 23.7K bytes - Viewed (0) -
tensorflow/cc/gradients/array_grad.cc
auto indices_size = ExpandDims(scope, Size(scope, indices), 0); Output outer_shape, flat_values_shape; if (batch_dims != 0) { auto values_shape = Shape(scope, values); // Add the batch offsets to indices and flatten the batch dimensions. outer_shape = Slice(scope, values_shape, {0}, {batch_dims}); auto inner_shape = Slice(scope, Slice(scope, values_shape, {batch_dims}, {-1}), {1}, {-1});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 10 23:33:32 UTC 2023 - 31.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc
rhs, DenseIntElementsAttr::get( RankedTensorType::get({rhs_rank}, rewriter.getI64Type()), rhs_permutation)); // Reshapes lhs to flatten out_dimensions and contracting_dimensions. llvm::SmallVector<int64_t, 4> lhs_flattened_shape = Concat<int64_t>( lhs_dot_dimensions_info.batch_dimensions().SizesArray(), llvm::ArrayRef<int64_t>{
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc
llvm::StringRef equation, RankedTensorType lhs_ty) { llvm::StringRef lhs; llvm::StringRef out; std::tie(lhs, out) = equation.split("->"); if (lhs.empty() || out.empty()) return std::nullopt; // Try to flatten the "..." if possible. int lhs_named_label, rhs_named_label; // following rhs and rhs_ty variables are non-functional here only created to // comply with the existing API llvm::StringRef rhs;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 33.3K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/saved_model_utils.cc
const SavedConcreteFunction& saved_concrete_function, const FunctionDef* function_def) { // tf.functions go through many transformations before becoming FunctionDefs // 1. flatten user-provided inputs: // https://github.com/tensorflow/tensorflow/blob/1c064ab76064c58e54261b805027474885a1534d/tensorflow/python/eager/function.py#L2671-L2675 // 2. convert user-provided inputs to tensors:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jan 12 19:17:46 UTC 2023 - 24K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
// Add inline pass. pass_manager.addPass(mlir::createInlinerPass()); // Expands mhlo.tuple ops. pass_manager.addPass( mlir::mhlo::createExpandHloTuplesPass(entry_function_name.str())); // Flatten tuples for control flows. pass_manager.addNestedPass<mlir::func::FuncOp>( mlir::mhlo::createFlattenTuplePass()); mlir::odml::AddMhloOptimizationPasses( pass_manager,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
} // Determine if op commutes with transposes. Requires a strict // definition of Elementwise, all i/o shapes and types must be same-rank // broadcastable and fully static. Consider moving this into attribute later. bool IsElementwise(Operation *op) { if (!(llvm::isa<TFL::AddOp, TFL::MulOp, TFL::DivOp, TFL::SubOp, TFL::MaximumOp, TFL::MinimumOp>(op))) { return false; } auto opr1_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc
// Guarantee all functions have one use, which enables more exact shape // inference. pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass()); // Run shape inference so that tf_executor/tf_device ops created later will // likely to inherit more concrete types. pm.addPass(mlir::TF::CreateTFShapeInferencePass()); pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUPartitionedOpConversionPass()); pm.addNestedPass<FuncOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 16:09:14 UTC 2024 - 11.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc
for (const int itensor : operations_[ioperation].tensors) { if (const Tensor& tensor = tensors_[itensor]; tensor.first_use() == ioperation /* output */ && tensor.last_use() > peak_loc /* used later */) { max_savings += tensor.size; } } } return max_savings; } std::tuple<Rematerializer::SizeT, Rematerializer::RematSpec>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 14 20:57:44 UTC 2023 - 13.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
// with appropriate shape to match with the shape of XlaDotV2 result. // We didn't apply XlaEinsum or XlaDotV2 for this work, since it would loose // the chance for constant folding later. We could try to add some // postprocessing passes later to further optimize the graph after constant // folding. Value CreateZeroPointPartialOffsetXlaDotV2( OpBuilder &builder, Location loc, Value tensor,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0)