- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 27 for Platen (0.21 sec)
-
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
// Add inline pass. pass_manager.addPass(mlir::createInlinerPass()); // Expands mhlo.tuple ops. pass_manager.addPass( mlir::mhlo::createExpandHloTuplesPass(entry_function_name.str())); // Flatten tuples for control flows. pass_manager.addNestedPass<mlir::func::FuncOp>( mlir::mhlo::createFlattenTuplePass()); mlir::odml::AddMhloOptimizationPasses( pass_manager,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
rhs, DenseIntElementsAttr::get( RankedTensorType::get({rhs_rank}, rewriter.getI64Type()), rhs_permutation)); // Reshapes lhs to flatten out_dimensions and contracting_dimensions. llvm::SmallVector<int64_t, 4> lhs_flattened_shape = Concat<int64_t>( lhs_dot_dimensions_info.batch_dimensions().SizesArray(), llvm::ArrayRef<int64_t>{
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
tensorflow/c/kernels/tensor_shape_utils.h
#define TENSORFLOW_C_KERNELS_TENSOR_SHAPE_UTILS_H_ #include <string> #include "tensorflow/c/tf_tensor.h" namespace tensorflow { // The following are utils for the shape of a TF_Tensor type. // These functions may later be subsumed by the methods for a // TF_TensorShape type. // Returns a string representation of the TF_Tensor shape. std::string ShapeDebugString(TF_Tensor* tensor); } // namespace tensorflow
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Aug 31 00:34:05 UTC 2022 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
} // Determine if op commutes with transposes. Requires a strict // definition of Elementwise, all i/o shapes and types must be same-rank // broadcastable and fully static. Consider moving this into attribute later. bool IsElementwise(Operation *op) { if (!(llvm::isa<TFL::AddOp, TFL::MulOp, TFL::DivOp, TFL::SubOp, TFL::MaximumOp, TFL::MinimumOp>(op))) { return false; } auto opr1_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc
// Guarantee all functions have one use, which enables more exact shape // inference. pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass()); // Run shape inference so that tf_executor/tf_device ops created later will // likely to inherit more concrete types. pm.addPass(mlir::TF::CreateTFShapeInferencePass()); pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUPartitionedOpConversionPass()); pm.addNestedPass<FuncOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 16:09:14 UTC 2024 - 11.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.h
// -------------------------------------------------------------------------- // // Clustering policy specifies if the operation can be clustered (in practice it // usually means that operation can be added to a cluster that will be later // compiled) given the set of constraints on its results, and might propagate or // create new constraints on the operation operands. // // Clustering policy must make a local decision just for a single operation. It
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 03:47:00 UTC 2023 - 12.1K bytes - Viewed (0) -
tensorflow/c/eager/unified_api_testutil.cc
// Returning null tensors from a tf.function is not supported, so we keep // track of indices in the model's outputs are nullptr in this set. // The FunctionDef only outputs the non-null tensors. We later pad the // function op outputs to have nullptrs at the `null_indices`. absl::flat_hash_set<int> null_indices; { AbstractContextPtr func_ctx(BuildFunction(fn_name));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 27 13:57:45 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/jit/cluster_scoping_pass.cc
// Node_Y will receive both scopes "unstage" and "stage", while Node_X receives // only scope "stage". The semantic of scope "unstage" is preserved although // scope "stage" is later appended. As a result, Node_X and Node_Y will be put // into different clusters. // // Unstage -> Node_Y (scope "unstage & stage") // | // V
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.h
// Generate a Max and an ArgMax of as the mhlo op returns both while in TF // we have separate ops for them. If only one of them is used then the other // one will be garbage collected later. if (!mlir::isa<ShapedType>(operand.getType())) return failure(); auto operand_type = mlir::cast<ShapedType>(operand.getType()); if (operand_type.getElementType().isInteger(1)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/constant_fold_utils.cc
return false; } // If any of the result types are variants, don't try to constant fold them. // This creates opaque variant constants which lose information and would // require "raising" later. for (const Type type : inst->getResultTypes()) { if (const TensorType tensor_type = mlir::dyn_cast<TensorType>(type)) { if (mlir::isa<VariantType>(tensor_type.getElementType())) { return false; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.3K bytes - Viewed (0)