- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 34 for quantize (0.19 sec)
-
tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir
// MixedPrecision-NEXT: %[[q:.*]] = "tfl.quantize"(%arg0) // MixedPrecision-NEXT: %[[dq:.*]] = "tfl.dequantize"(%[[q]]) // MixedPrecision-NEXT: %[[q_0:.*]] = "tfl.quantize"(%arg1) // MixedPrecision-NEXT: %[[dq_0:.*]] = "tfl.dequantize"(%[[q_0]]) // MixedPrecision-NEXT: %[[c:.*]] = "tfl.concatenation"(%[[dq]], %[[dq_0]]) // MixedPrecision-NEXT: %[[q_1:.*]] = "tfl.quantize"(%[[c]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 67.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir
// RUN: tf-opt %s -tfl-prepare-quantize="quantize-signed=true post-training-quantize=true" -cse | FileCheck %s // RUN: tf-opt %s -tfl-prepare-quantize="quantize-signed=true post-training-quantize=true legacy-float-scale=true" -cse| FileCheck --check-prefix=Legacy %s // CHECK-LABEL: QuantizeLstmCellInput func.func @QuantizeLstmCellInput(%arg0: tensor<1x28x28xf32>) -> tensor<1x28x20xf32> { %cst_2 = "tfl.no_value"() {value = unit} : () -> none
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 52.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc
/*value=*/filter_i8_value_attr); // Replace filter uses with uniform quantized filter. rewriter.replaceAllUsesWith(filter_op->getResult(0), quantized_filter_constant_op.getResult()); // Replace conv op with a new convolution op that has quantized output type. // Quantize -> Dequantize following r3. auto output_uniform_quantize_call_op = cast<func::CallOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
lines.push_back(absl::StrFormat( "Number of quantized layers with quantized outputs: %d/%d", total_quantized_func_count - float_output_func_count, total_quantized_func_count)); lines.push_back(absl::StrFormat("Number of quantize layers added: %d", quantize_func_count)); lines.push_back(absl::StrFormat("Number of dequantize layers added: %d",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir
^bb0(%arg0: tensor<1x2xf32>): %cst_0 = arith.constant dense<[1, 0]> : tensor<2xi32> %0 = "tfl.quantize"(%arg0){qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>}: (tensor<1x2xf32>) -> (tensor<1x2x!quant.uniform<u8:f32, 1.0>>) %1 = "tfl.dequantize"(%0): (tensor<1x2x!quant.uniform<u8:f32, 1.0>>) -> (tensor<1x2xf32>) %2 = "tf.Transpose"(%1, %cst_0): (tensor<1x2xf32>, tensor<2xi32>) -> tensor<2x1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 59.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
Eq(TensorType_INT8)); // Verify FC bias should be int32 quantized. ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(2))->type(), Eq(TensorType_FLOAT32)); EXPECT_THAT(subgraph->tensors[op->inputs[2]].get()->type, Eq(TensorType_INT32)); // The output tensor of FC should be int8 quantized. ASSERT_THAT(float_graph->tensors()->Get(float_op->outputs()->Get(0))->type(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions -quant-quantize-composite-functions | FileCheck --check-prefix=TF %s // RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions -quant-quantize-composite-functions='target-opset=XLA' | FileCheck --check-prefix=XLA %s
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Nov 06 01:23:21 UTC 2023 - 80.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_import.cc
return emitError(loc, type_or_err.status().ToString()), type_or_err.status(); } auto type = std::move(type_or_err).value(); if (op_name == "tfl.quantize") { // Special case for quantize: return type must also be in qtype attribute op_state.addAttribute("qtype", mlir::TypeAttr::get(type)); } else if (op_name == "tfl.reshape" && op_state.operands.size() == 1) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 66.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
.Default([](Type) { llvm_unreachable("QuantizedTypeIsUnsigned: not a quantized type"); return false; }); } // Return the half_range value that is used by DequantizeOp. half_range is used // to offset the quantized representation before it gets scaled. In the case // of negative quantize types, this offset is half the type's range.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
foreach BinaryOp = [TFL_DivOp, TFL_MulOp]<Op> in defm : FuseMulOrDivWithConv2dOrDepthwiseConv2d<BinaryOp>; // This pattern applies when the same quantize/dequantize have been used twice // with the same scale. We want to remove the redundancy. // TODO(fengliuai): move this to the sanity check of pre-quantize pass. def eliminate_dq_q_pairs : Pat< (TFL_QuantizeOp (TFL_DequantizeOp $in), $qt), (replaceWithValue $in),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0)