- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 108 for input_type (0.15 sec)
-
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
// process. struct ExpressedToQuantizedConverter { // Creates a converter for the given input type. static ExpressedToQuantizedConverter forInputType(Type input_type); // Converts the inputType to be based on the given elemental type, // returning the new type (or nullptr and emit an error on failure). Type convert(quant::QuantizedType elemental_type) const; // Whether the conversion is legal.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
if (dyn_cast_or_null<quantfork::QuantizeCastOp>(next_op)) return failure(); auto input_type = mlir::cast<ShapedType>(transpose_op.getInput().getType()); auto perm_type = mlir::cast<ShapedType>(transpose_op.getPerm().getType()); if (input_type.hasStaticShape() && perm_type.hasStaticShape()) { if (perm_type.getNumElements() != input_type.getRank()) { return transpose_op.emitOpError(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
return failure(); } const auto input_type = op.getLhs().getType().cast<TensorType>(); if (!(input_type.getRank() == 2 || input_type.getRank() == 3)) { LLVM_DEBUG(llvm::dbgs() << "Input expected to have rank of 2 or 3. Got: " << input_type << ".\n"); return failure(); } const Value filter = op.getRhs();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
BoolAttr narrow_range = builder.getBoolAttr(false); auto add_quantize_op = [&](Location loc, Type input_type, Block* block, Block::iterator insertion_point, Value arg, int i) { if (auto shaped = mlir::dyn_cast<ShapedType>(input_type)) { if (mlir::isa<FloatType>(shaped.getElementType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
break; default: return nullptr; // Not yet supported } } else { return nullptr; // Not yet supported } input_type = input_type.clone(new_storage_type); return input_type; } // Replaces quant.qcast op to composite quantize_i8 function. class ReplaceQuantizePattern : public mlir::OpRewritePattern<quantfork::QuantizeCastOp> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type()); SmallVector<mlir::Type, 4> input_types{input_type, weight_type, bias_type, projection_type, layer_norm_scale_type}; auto func_type = builder->getFunctionType(input_types, output_type); auto func = func::FuncOp::create(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
BoolAttr narrow_range = builder.getBoolAttr(false); auto add_quantize_op = [&](Location loc, Type input_type, Block* block, Block::iterator insertion_point, Value arg, int i) { if (auto shaped = mlir::dyn_cast<ShapedType>(input_type)) { if (mlir::isa<FloatType>(shaped.getElementType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
TfLiteStatus QuantizeModel(ModelT* model, const TensorType& input_type, const TensorType& output_type, bool allow_float, std::string& output_buffer) { return QuantizeModel(model, input_type, output_type, allow_float, /*operator_names=*/{}, TensorType_INT8, output_buffer); } TfLiteStatus QuantizeModel(ModelT* model, const TensorType& input_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
return in_placeholder, output_tensor def _create_simple_tf1_gather_model( self, input_type: dtypes.DType, use_variable_for_filter=False ) -> Tuple[core.Tensor, core.Tensor]: """Creates a basic gather model. This is intended to be used for TF1 (graph mode) tests. Args: input_type: type of the input index tensor for gather operation.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
GetBroadcastShapesForBatchMatmul(ShapedType input_type, ShapedType weight_type) { ArrayRef<int64_t> input_shape = input_type.getShape(); ArrayRef<int64_t> weight_shape = weight_type.getShape(); const int64_t num_matmul_dim = 2; const int64_t num_input_batch_dim = input_type.getRank() - num_matmul_dim;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0)