- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 196 for dequantize (0.15 sec)
-
tensorflow/compiler/mlir/lite/quantization/ir/Passes.td
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jul 29 18:55:28 UTC 2022 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/split-merged-operands.mlir
// CHECK-DAG: %[[CST_1:.*]] = "tfl.pseudo_const"() <{value = dense<0.000000e+00> : tensor<4x4xf16>}> : () -> tensor<4x4xf16> // CHECK-DAG: %[[DQ_0:.*]] = "tfl.dequantize"(%[[CST_0]]) : (tensor<4x4xf16>) -> tensor<4x4xf32> // CHECK-DAG: %[[DQ_1:.*]] = "tfl.dequantize"(%[[CST_1]]) : (tensor<4x4xf16>) -> tensor<4x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// TODO: b/323478683 - Make the attribute being part of op definition. quantize->setAttr(kVolatileOpAttrName, builder_.getUnitAttr()); // `original_result` has a use to `quantize`, so this will replace that use // by the result of `dequantize`. Remember to reset that use afterwards value.replaceAllUsesWith(dequantize); quantize.getOperation()->replaceUsesOfWith(dequantize, value); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
mlir::cast<ShapedType>(call_op.getResult(0).getType()) .clone(rewriter.getF32Type())); rewriter.setInsertionPoint(call_op); rewriter.insert(new_call_op); // Remove the dequantize ops and replace uses by the new func.call op. SmallVector<Operation*> users_to_erase; for (auto user : users) { llvm::dyn_cast<mlir::stablehlo::UniformDequantizeOp>(user)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
// `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported. bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) { if (storage_type.getWidth() == 8 || (storage_type.isSigned() && storage_type.getWidth() == 16)) { return true; } LLVM_DEBUG(llvm::dbgs() << "Uniform quantize / dequantize op only supports ui8, i8 or "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
} : (tensor<*xf32>, tensor<*xf32>, tensor<*xi32>) -> tensor<*x!tf_type.qint32> func.return %quantize : tensor<*x!tf_type.qint32> } // Dequantize final graph output back to f32. Input is qint8. func.func @dequantize_i8(%input : tensor<*x!tf_type.qint8>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>) -> tensor<*xf32> { %dequantize = "tf.UniformDequantize"(%input, %input_scale, %input_zp) { Tin = "tfdtype$DT_QINT8",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/split_merged_operands.cc
// Rewire the inputs. op->setOperand(index, duplicated_input_op->getResult(0)); } else if (auto dq = dyn_cast<DequantizeOp>(input_op); dq && matchPattern(dq.getInput(), m_Constant(&attr))) { // Constant -> Dequantize case. builder->setInsertionPoint(op); Operation* duplicated_input_op = builder->clone(*dq.getInput().getDefiningOp());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
if (failed(candidate_ops) || candidate_ops->empty()) return failure(); // Rewrite the floating-point ops to the quantized version, by fusing // preceding dequantize ops and succeding quantize ops. for (Operation* candidate_op : *candidate_ops) { // If it is requantize op, we shouldn't rewrite this op. if (isa<QuantizeOpT, DequantizeOpT>(candidate_op)) { return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
quant::QuantizationSpecs quant_specs_; }; #include "tensorflow/compiler/mlir/lite/utils/generated_op_quant_spec_getters.inc" // If the weight is applicable to dynamic range quantization, insert Quantize // and Dequantize ops with either per-axis or per-tensor scale. class PrepareDynamicRangeQuantizableOp : public OpRewritePattern<arith::ConstantOp> { public: explicit PrepareDynamicRangeQuantizableOp(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_translate_cl.cc
// going forward. // NOLINTNEXTLINE llvm::cl::list<std::string> custom_opdefs( "tf-custom-opdefs", llvm::cl::desc("List of custom opdefs when importing " "graphdef")); // Quantize and Dequantize ops pair can be optionally emitted before and after // the quantized model as the adaptors to receive and produce floating point // type data with the quantized model. Set this to `false` if the model input is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 20:53:17 UTC 2024 - 7.9K bytes - Viewed (0)