- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 100 for input_dtype (0.22 sec)
-
tensorflow/compiler/mlir/tfr/passes/raise_to_tf.cc
const llvm::SmallVectorImpl<Attribute>& input_types, llvm::SmallVectorImpl<Value>& input_values) const { if (input_types.size() <= 1) return; Type target_input_type = mlir::cast<TypeAttr>(input_types[0]).getValue(); auto result_type = UnrankedTensorType::get(target_input_type); for (auto i = 1; i < input_types.size(); ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc
// accumulation over the given input type. Type GetSumAccumulationType(Type input_type) { MLIRContext *ctx = input_type.getContext(); if (input_type.isBF16() || input_type.isF16()) return FloatType::getF32(ctx); if (input_type.isSignlessInteger(8) || input_type.isSignlessInteger(16)) return IntegerType::get(ctx, 32); return input_type; } // Returns axis in HLO format from TF elements attr with exactly one element or
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 291.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc
OP_REQUIRES(context, context->input_type(i * 3) == DT_FLOAT, absl::AbortedError("The input `min` must have float type.")); OP_REQUIRES(context, context->input_type(i * 3 + 1) == DT_FLOAT, absl::AbortedError("The input `max` must have float type.")); OP_REQUIRES( context, context->input_type(i * 3 + 2) == DT_INT64,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 13 01:31:23 UTC 2024 - 8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc
LogicalResult CheckFusableLayerNormalizedLstmCellSimple( func::FuncOp lstm_func) { for (int i = 0; i < 5; ++i) { auto input = lstm_func.getArgument(i); auto input_type = mlir::dyn_cast_or_null<RankedTensorType>(input.getType()); if (!input_type) { lstm_func.emitWarning( "we cannot fuse this lstm func because all the inputs have not " "ranked tensor type."); return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_annotate_dynamic_shape_inputs.cc
for (int index : dynamic_shape_arg_index) { BlockArgument arg = func.getArgument(index); auto inputType = mlir::dyn_cast<RankedTensorType>(arg.getType()); // Only rank 1 tensor is supported for now. if (!inputType || inputType.getRank() != 1) continue; auto shape = llvm::to_vector<4>(inputType.getShape()); llvm::SmallVector<int64_t, 4> bounds(shape.begin(), shape.end()); // Mark the dim as dynamic dim.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tf_xla_mlir_translate.cc
if (!module_op) return mlir::failure(); llvm::SmallVector<XlaArgument, 4> xla_arguments; auto args_status = ParseXlaArguments( mlir::StringRefToView(input_shapes), mlir::StringRefToView(input_dtypes), mlir::StringRefToView(input_types), xla_arguments); if (!args_status.ok()) { LOG(ERROR) << args_status; return mlir::failure(); } XlaCompilationResult compilation_result; auto compilation_status =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 18.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h
// Quantizes the input model represented as `model_buffer` and writes the result // to the `output_buffer`. Both `model_buffer` and `output_buffer` should be a // valid FlatBuffer format for Model supported by TFLite. // // The `input_type`, `output_type` and `inference_type` can be float32 / qint8 / // int8 / int16. // // Returns a partially quantized model if `fully_quantize` is false. Returns a // non-OK status if the quantization fails. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
<< ", input_inference_type: " << tflite::EnumNameTensorType(input_type) << ", output_inference_type: " << tflite::EnumNameTensorType(output_type) << "\n"; mlir::Builder mlir_builder(&context); mlir::Type input_mlir_type = tflite::ConvertElementType(input_type, mlir_builder); mlir::Type output_mlir_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0) -
subprojects/core/src/main/java/org/gradle/api/internal/initialization/transform/ExternalDependencyInstrumentingArtifactTransform.java
File input = getInput().get().getAsFile(); InstrumentationInputType inputType = getInputType(input); switch (inputType) { case DEPENDENCY_ANALYSIS_DATA: doOutputTransformedFile(input, outputs); return; case ORIGINAL_ARTIFACT:
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Apr 18 15:08:33 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
return failure(); Value input = tf_op.getInput(); RankedTensorType input_type = mlir::dyn_cast<RankedTensorType>(input.getType()); // Only rank size four input will be only available by the tf.Conv2D // operator verification. if (!input_type || input_type.isDynamicDim(3)) { return failure(); } // Check if the given op is based on grouped convolution.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0)