- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 45 for dequantize (0.42 sec)
-
tensorflow/compiler/mlir/lite/tests/split-merged-operands.mlir
// CHECK-DAG: %[[CST_1:.*]] = "tfl.pseudo_const"() <{value = dense<0.000000e+00> : tensor<4x4xf16>}> : () -> tensor<4x4xf16> // CHECK-DAG: %[[DQ_0:.*]] = "tfl.dequantize"(%[[CST_0]]) : (tensor<4x4xf16>) -> tensor<4x4xf32> // CHECK-DAG: %[[DQ_1:.*]] = "tfl.dequantize"(%[[CST_1]]) : (tensor<4x4xf16>) -> tensor<4x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
// `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported. bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) { if (storage_type.getWidth() == 8 || (storage_type.isSigned() && storage_type.getWidth() == 16)) { return true; } LLVM_DEBUG(llvm::dbgs() << "Uniform quantize / dequantize op only supports ui8, i8 or "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
mlir::cast<ShapedType>(call_op.getResult(0).getType()) .clone(rewriter.getF32Type())); rewriter.setInsertionPoint(call_op); rewriter.insert(new_call_op); // Remove the dequantize ops and replace uses by the new func.call op. SmallVector<Operation*> users_to_erase; for (auto user : users) { llvm::dyn_cast<mlir::stablehlo::UniformDequantizeOp>(user)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/split_merged_operands.cc
// Rewire the inputs. op->setOperand(index, duplicated_input_op->getResult(0)); } else if (auto dq = dyn_cast<DequantizeOp>(input_op); dq && matchPattern(dq.getInput(), m_Constant(&attr))) { // Constant -> Dequantize case. builder->setInsertionPoint(op); Operation* duplicated_input_op = builder->clone(*dq.getInput().getDefiningOp());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/components/post_calibration_component.mlir
// CHECK-NO-UNPACK: %[[DEQUANTIZE:.+]] = stablehlo.uniform_dequantize %[[QUANTIZE_1]] : (tensor<1x3x!quant.uniform<i8:f32, {{.*}}>>) -> tensor<1x3xf32> // CHECK-NO-UNPACK: return %[[DEQUANTIZE]] : tensor<1x3xf32> // ----- // Tests that a simple dot_general without CustomAggregators is not quantized.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 01:09:50 UTC 2024 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc
if (!read_variable_op) continue; // Add dequantize. builder.setInsertionPointAfter(read_variable_op); auto new_read_variable_op = builder.create<ReadVariableOp>(read_variable_op.getLoc(), ref_qtype, read_variable_op.getResourceId()); auto new_dq_op = builder.create<DequantizeOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.td
* A tensor is dequantized using a `func::FuncOp` whose name contains "uniform_dequantize". The first argument is the tensor to be quantized, the second argument is the zero point constant (element type: int) and the third argument is the inverse scale constant (element type: float). * Inputs to the target quantized op is quantized and the outputs are dequantized.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
op->erase(); }); } // Fold quantized i32 (normally bias) into their float values. struct FoldQuantizedI32ToFloat : public OpRewritePattern<TFL::DequantizeOp> { using OpRewritePattern<TFL::DequantizeOp>::OpRewritePattern; LogicalResult matchAndRewrite(TFL::DequantizeOp dequant_op, PatternRewriter& rewriter) const override { // We only fold i32 -> float pattern.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h
// whose storage type is 32-bit integer and expressed type is f32. bool IsI32F32UniformQuantizedPerAxisType(Type type); // Determines whether the storage type of a quantized type is supported by // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported. bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type); // Returns true if a type is quantized tensor type. bool IsQuantizedTensorType(Type type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/common/tfl_pass_config.h
// have side effects e.g. reduced flatbuffer size. Only certain type // conversions are supported. bool reduce_type_precision = false; // Whether to consider this model a quantized model with quantize/dequantize // ops and to convert kernels to quantized kernels wherever appropriate. quant::QDQConversionMode qdq_conversion_mode = quant::QDQConversionMode::kQDQNone;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:05:30 UTC 2024 - 6.5K bytes - Viewed (0)