- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 24 for _input_shapes (0.21 sec)
-
tensorflow/cc/gradients/grad_helper.h
// Helper function for reduction ops. // // input_shape: 1-D Tensor, the shape of the Tensor being reduced. // axes: 1-D Tensor, the reduction axes. // Note that the reduction indices are in the range // -rank(input_shape), rank(input_shape) // returns a 1-D Tensor, the output shape as if keep_dims were set to True. Output ReducedShapeHelper(const Scope& scope, const Output& input_shape, const Output& reduction_axes);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 07 23:11:54 UTC 2022 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_type.cc
} void ConvertToMlirShape(const TensorShape& input_shape, llvm::SmallVectorImpl<int64_t>* shape) { shape->reserve(input_shape.dims()); for (const auto& d : input_shape) { shape->push_back(d.size == kTFDynamicSize ? ShapedType::kDynamic : d.size); } } Status ConvertToMlirShape(const TensorShapeProto& input_shape, llvm::SmallVectorImpl<int64_t>* shape) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/optimize.cc
ArrayRef<int64_t> input_shape = input_type.getShape(); if (reshape_shape.size() > input_shape.size()) return failure(); // Extend the input shape with leading 1s to match the broadcast shape. ArrayRef<int64_t> broadcast_shape = output_type.getShape(); SmallVector<int64_t, 4> input_shape_extended; input_shape_extended.append(broadcast_shape.size() - input_shape.size(), 1);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/stablehlo_quantizer_odml_oss.ipynb
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 12 03:40:43 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.h
// InferShapeForFunction. FailureOr<bool> InferModuleShape(ModuleOp module, int64_t max_iterations = 10, ArrayRef<TypeID> ops_to_skip = {}, ArrayRef<ArrayRef<int64_t>> input_shapes = {}); // Given a tensorflow NodeShape string, returns a vector of argument shapes // that can be used with InferShapeForFunction. // TF NodeShape uses `,` to separate dimensions, and `:` to separate arguments.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 12:49:45 UTC 2024 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_type.h
// Converts an TensorFlow shape to the one used in MLIR. void ConvertToMlirShape(const TensorShape& input_shape, llvm::SmallVectorImpl<int64_t>* shape); // Converts an TensorFlow shape proto to the one used in MLIR. Status ConvertToMlirShape(const TensorShapeProto& input_shape, llvm::SmallVectorImpl<int64_t>* shape);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 2.2K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/device_compiler_test_helper.cc
{{"dtype", DT_FLOAT}, {"shape", input_shape}}); *graph.add_node() = MakeNode("b", "Placeholder", {}, {{"dtype", DT_FLOAT}, {"shape", input_shape}}); *graph.add_node() = MakeNode("c", "Placeholder", {}, {{"dtype", DT_FLOAT}, {"shape", input_shape}}); *graph.add_node() = MakeNode("m", "TestFn", {"a", "b", "c"}, {}); return graph; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 09 08:24:16 UTC 2024 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc
return func; } func::FuncOp createMaxUnpoolingFunc( mlir::Builder* builder, const SmallVector<int64_t, 4>& input_shape, const SmallVector<int64_t, 4>& output_shape) { auto input_type = RankedTensorType::get(input_shape, builder->getF32Type()); auto indices_type = RankedTensorType::get(input_shape, builder->getI64Type()); auto output_type = RankedTensorType::get(output_shape, builder->getF32Type());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 29 21:02:21 UTC 2022 - 7.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_cl.h
// Please see the implementation file for documentation of these options. // Import options. extern llvm::cl::opt<std::string> input_arrays; extern llvm::cl::opt<std::string> input_dtypes; extern llvm::cl::opt<std::string> input_shapes; extern llvm::cl::opt<std::string> output_arrays; extern llvm::cl::opt<std::string> control_output_arrays; extern llvm::cl::opt<std::string> inference_type; extern llvm::cl::opt<std::string> min_values;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 10 20:59:50 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc
enable_soft_placement, set_original_tf_func_name}; auto module_or = tensorflow::GraphdefToMlirTranslateFunction( input, input_arrays, input_dtypes, input_shapes, output_arrays, control_output_arrays, options, context); if (!module_or.status().ok()) return nullptr; return std::move(module_or).value(); } static TranslateToMLIRRegistration GraphdefToMlirTranslate(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 7.8K bytes - Viewed (0)