- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 62 for input_type (0.16 sec)
-
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
if (dyn_cast_or_null<quantfork::QuantizeCastOp>(next_op)) return failure(); auto input_type = mlir::cast<ShapedType>(transpose_op.getInput().getType()); auto perm_type = mlir::cast<ShapedType>(transpose_op.getPerm().getType()); if (input_type.hasStaticShape() && perm_type.hasStaticShape()) { if (perm_type.getNumElements() != input_type.getRank()) { return transpose_op.emitOpError(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
BoolAttr narrow_range = builder.getBoolAttr(false); auto add_quantize_op = [&](Location loc, Type input_type, Block* block, Block::iterator insertion_point, Value arg, int i) { if (auto shaped = mlir::dyn_cast<ShapedType>(input_type)) { if (mlir::isa<FloatType>(shaped.getElementType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type()); SmallVector<mlir::Type, 4> input_types{input_type, weight_type, bias_type, projection_type, layer_norm_scale_type}; auto func_type = builder->getFunctionType(input_types, output_type); auto func = func::FuncOp::create(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
BoolAttr narrow_range = builder.getBoolAttr(false); auto add_quantize_op = [&](Location loc, Type input_type, Block* block, Block::iterator insertion_point, Value arg, int i) { if (auto shaped = mlir::dyn_cast<ShapedType>(input_type)) { if (mlir::isa<FloatType>(shaped.getElementType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
GetBroadcastShapesForBatchMatmul(ShapedType input_type, ShapedType weight_type) { ArrayRef<int64_t> input_shape = input_type.getShape(); ArrayRef<int64_t> weight_shape = weight_type.getShape(); const int64_t num_matmul_dim = 2; const int64_t num_input_batch_dim = input_type.getRank() - num_matmul_dim;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
mlir::OpBuilder builder(func.getBody()); auto& bb = func.front(); auto loc = func.getLoc(); int num_args = bb.getNumArguments(); llvm::SmallVector<Type, 4> input_types; input_types.reserve(num_args); // Edit the block arguments and create the new input ops in place to replace // the old input ops and quantize ops. for (int i = 0; i != num_args; ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/cc/framework/ops.h
class Operation { public: Operation() : node_(nullptr) {} explicit Operation(Node* n); int32 num_inputs() const { return node_->num_inputs(); } DataType input_type(int32_t o) const { return node_->input_type(o); } Output input(int32_t i) const; int32 num_outputs() const { return node_->num_outputs(); } DataType output_type(int32_t o) const { return node_->output_type(o); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
return attr_enforced_quantizable || trait_enforced_quantizable; } // Returns the quantized type for the // input_type/min/max/storag_type_width/narrow_range. // This is entry point to the Quant dialect and used for both quantizing // activations and weights. Type GetQuantizedType(Builder builder, const Type input_type, const ArrayRef<double> min, const ArrayRef<double> max,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/converter_python_api.cc
const tflite::TensorType input_type = FromTocoDataTypeToTflitToTensorType(input_data_type); const tflite::TensorType output_type = FromTocoDataTypeToTflitToTensorType(output_data_type); std::string output_model; const absl::string_view input_model_buffer(buf, length); auto status = mlir::lite::QuantizeModel( input_model_buffer, input_type, output_type, inference_tensor_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
// Create a tfl.transpose op that performs ZX transpose on `input`. auto create_z_x_transpose_op = [&](Value input) -> Value { RankedTensorType input_type = mlir::cast<RankedTensorType>(input.getType()); const int input_rank = input_type.getRank(); // Create a 1D I32 tensor for representing the dimension permutation. auto permuation_tensor_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0)