- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 139 for input_type (0.2 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h
// applying the permutation to a given shape through a transpose. PermutationAndShape GetPermutationAndTransposedShape( llvm::ArrayRef<int64_t> permutation_array, ShapedType input_type, ConversionPatternRewriter& rewriter); // Create a single const integer. Value BuildIntConstOp(ImplicitLocOpBuilder& builder, ConversionPatternRewriter& rewriter, int64_t const_value,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Nov 08 11:35:25 UTC 2023 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type()); SmallVector<mlir::Type, 4> input_types{input_type, weight_type, bias_type, projection_type, layer_norm_scale_type}; auto func_type = builder->getFunctionType(input_types, output_type); auto func = func::FuncOp::create(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
BoolAttr narrow_range = builder.getBoolAttr(false); auto add_quantize_op = [&](Location loc, Type input_type, Block* block, Block::iterator insertion_point, Value arg, int i) { if (auto shaped = mlir::dyn_cast<ShapedType>(input_type)) { if (mlir::isa<FloatType>(shaped.getElementType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
TfLiteStatus QuantizeModel(ModelT* model, const TensorType& input_type, const TensorType& output_type, bool allow_float, std::string& output_buffer) { return QuantizeModel(model, input_type, output_type, allow_float, /*operator_names=*/{}, TensorType_INT8, output_buffer); } TfLiteStatus QuantizeModel(ModelT* model, const TensorType& input_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
return in_placeholder, output_tensor def _create_simple_tf1_gather_model( self, input_type: dtypes.DType, use_variable_for_filter=False ) -> Tuple[core.Tensor, core.Tensor]: """Creates a basic gather model. This is intended to be used for TF1 (graph mode) tests. Args: input_type: type of the input index tensor for gather operation.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
GetBroadcastShapesForBatchMatmul(ShapedType input_type, ShapedType weight_type) { ArrayRef<int64_t> input_shape = input_type.getShape(); ArrayRef<int64_t> weight_shape = weight_type.getShape(); const int64_t num_matmul_dim = 2; const int64_t num_input_batch_dim = input_type.getRank() - num_matmul_dim;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
@test_util.run_in_graph_and_eager_modes def test_qat_gather_and_conv_model( self, ): input_type = dtypes.int32 model = self._create_simple_gather_and_conv_model( input_type, filter_shape=(2, 3, 3, 1024), is_qat_model=True, ) saved_model_save.save(model, self._input_saved_model_path)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
// as its an input requirement. if (!input_ty.hasRank() || input_ty.getRank() != 4) { return failure(); } int64_t batch_cst = input_ty.getShape()[0]; int64_t channels_cst = input_ty.getShape()[3]; int64_t in_y_cst = input_ty.getShape()[1]; int64_t in_x_cst = input_ty.getShape()[2]; int64_t in_spatial_cst =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
mlir::OpBuilder builder(func.getBody()); auto& bb = func.front(); auto loc = func.getLoc(); int num_args = bb.getNumArguments(); llvm::SmallVector<Type, 4> input_types; input_types.reserve(num_args); // Edit the block arguments and create the new input ops in place to replace // the old input ops and quantize ops. for (int i = 0; i != num_args; ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
static bool AreInputDimensionsOneInAxes(Value input, const mlir::Attribute &axes) { RankedTensorType input_type = mlir::dyn_cast_or_null<RankedTensorType>(input.getType()); if (!input_type) return false; auto type_shape = input_type.getShape(); DenseIntElementsAttr axes_attr = mlir::dyn_cast_or_null<DenseIntElementsAttr>(axes); if (!axes_attr) return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0)