- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 108 for input_type (0.16 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_utils.cc
output_shape[1] = composite_result_shape[2]; output_shape[2] = composite_result_shape[3]; output_shape[3] = composite_result_shape[1]; auto input_type = mlir::cast<ShapedType>(old_op->getOperand(0).getType()); return RankedTensorType::get(output_shape, input_type.getElementType()); } } // namespace odml
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 18:33:05 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc
OP_REQUIRES(context, context->input_type(i * 3) == DT_FLOAT, absl::AbortedError("The input `min` must have float type.")); OP_REQUIRES(context, context->input_type(i * 3 + 1) == DT_FLOAT, absl::AbortedError("The input `max` must have float type.")); OP_REQUIRES( context, context->input_type(i * 3 + 2) == DT_INT64,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 13 01:31:23 UTC 2024 - 8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc
LogicalResult CheckFusableLayerNormalizedLstmCellSimple( func::FuncOp lstm_func) { for (int i = 0; i < 5; ++i) { auto input = lstm_func.getArgument(i); auto input_type = mlir::dyn_cast_or_null<RankedTensorType>(input.getType()); if (!input_type) { lstm_func.emitWarning( "we cannot fuse this lstm func because all the inputs have not " "ranked tensor type."); return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
<< n_init_values << ")"; } auto input_ty_0 = inputs_ty[0].cast<ShapedType>(); if (input_ty_0.hasStaticShape()) { for (int i = 1; i < n_inputs; ++i) { auto input_ty_i = inputs_ty[i].cast<ShapedType>(); if (input_ty_i.hasStaticShape() && input_ty_i.getShape() != input_ty_0.getShape()) { return op.emitOpError()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h
// Quantizes the input model represented as `model_buffer` and writes the result // to the `output_buffer`. Both `model_buffer` and `output_buffer` should be a // valid FlatBuffer format for Model supported by TFLite. // // The `input_type`, `output_type` and `inference_type` can be float32 / qint8 / // int8 / int16. // // Returns a partially quantized model if `fully_quantize` is false. Returns a // non-OK status if the quantization fails. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils.cc
Operation** result) { auto input_type = mlir::cast<RankedTensorType>(input.getType()); SmallVector<int64_t, 4> output_shape; int size_of_splits; if (input_type.getRank() < axis || axis < 0) return failure(); for (int i = 0; i < input_type.getRank(); ++i) { int64_t dim = input_type.getDimSize(i); if (i == axis) { if (dim % splits != 0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 36.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
<< ", input_inference_type: " << tflite::EnumNameTensorType(input_type) << ", output_inference_type: " << tflite::EnumNameTensorType(output_type) << "\n"; mlir::Builder mlir_builder(&context); mlir::Type input_mlir_type = tflite::ConvertElementType(input_type, mlir_builder); mlir::Type output_mlir_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
llvm::SmallVector<int64_t, 4> input_shape(4, ShapedType::kDynamic); auto input_type = mlir::cast<TensorType>(op.getInput().getType()); if (input_type.hasRank()) { if (input_type.getRank() != 4) return op.emitOpError() << "requires input to be a 4D tensor, but got " << input_type; int64_t input_batch = input_type.getDimSize(0); if (input_batch != ShapedType::kDynamic &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/optimize.cc
if (!reshape_type.hasStaticShape()) return failure(); ArrayRef<int64_t> reshape_shape = reshape_type.getShape(); auto input_type = mlir::cast<ShapedType>(op.getInput().getType()); auto output_type = mlir::cast<ShapedType>(op.getOutput().getType()); if (!input_type.hasRank() || !output_type.hasRank()) return failure(); // The pattern attempts to reduce the rank of the input to BroadcastTo.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
return failure(); Value input = tf_op.getInput(); RankedTensorType input_type = mlir::dyn_cast<RankedTensorType>(input.getType()); // Only rank size four input will be only available by the tf.Conv2D // operator verification. if (!input_type || input_type.isDynamicDim(3)) { return failure(); } // Check if the given op is based on grouped convolution.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0)