- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for PerAxis (0.11 sec)
-
tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc
// tf.dequantize // | template <typename TFFakeQuantOp, bool PerAxis> struct InsertQuantOpsAfterTFFakeQuantOp : public OpRewritePattern<TFFakeQuantOp> { using BaseType = InsertQuantOpsAfterTFFakeQuantOp<TFFakeQuantOp, PerAxis>; explicit InsertQuantOpsAfterTFFakeQuantOp<TFFakeQuantOp, PerAxis>( MLIRContext *ctx) : OpRewritePattern<TFFakeQuantOp>(ctx) {}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.cc
// TF::FakeQuant operators using PreparePerTensorFakeQuant = ConvertFakeQuantOpToQuantOps< TF::FakeQuantWithMinMaxVarsOp, /*PerAxis=*/false, FetchConstantMinMaxInputs<TF::FakeQuantWithMinMaxVarsOp>>; using PreparePerChannelFakeQuant = ConvertFakeQuantOpToQuantOps< TF::FakeQuantWithMinMaxVarsPerChannelOp, /*PerAxis=*/true, FetchConstantMinMaxInputs<TF::FakeQuantWithMinMaxVarsPerChannelOp>>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 19 00:13:50 UTC 2022 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc
using PreparePerTensorFakeQuant = InsertTFLQuantOpsAfterTFFakeQuantOp< TF::FakeQuantWithMinMaxVarsOp, /*PerAxis=*/false, FetchConstantMinMaxInputs<TF::FakeQuantWithMinMaxVarsOp>>; using PreparePerChannelFakeQuant = InsertTFLQuantOpsAfterTFFakeQuantOp< TF::FakeQuantWithMinMaxVarsPerChannelOp, /*PerAxis=*/true, FetchConstantMinMaxInputs<TF::FakeQuantWithMinMaxVarsPerChannelOp>>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 03 00:14:05 UTC 2023 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h
// case of back-to-back tf.FakeQuant occurs // // tf.FakeQuant* // | // tf.FakeQuant* // template <typename TFFakeQuantOp, bool PerAxis, class FetchMinMax> class InsertTFLQuantOpsAfterTFFakeQuantOp { public: explicit InsertTFLQuantOpsAfterTFFakeQuantOp(bool use_fake_quant_num_bits) : use_fake_quant_num_bits_(use_fake_quant_num_bits) {}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
// case of back-to-back tf.FakeQuant occurs // // tf.FakeQuant* // | // tf.FakeQuant* // template <typename TFFakeQuantOp, bool PerAxis, class FetchMinMax> class ConvertFakeQuantOpToQuantOps { public: explicit ConvertFakeQuantOpToQuantOps(bool use_fake_quant_num_bits) : use_fake_quant_num_bits_(use_fake_quant_num_bits) {}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/device_target.cc
} for (auto spec : rop.getOutputSpecs()) { out_specs.push_back(spec.cast<TypeAttr>().getValue()); } auto in_spec = input_specs[0].dyn_cast<UniformQuantizedType>(); // TODO(fengliuai): handles the PerAxis QuantizedType. auto w_spec = input_specs[1].dyn_cast<UniformQuantizedType>(); auto b_spec = input_specs[2].dyn_cast<UniformQuantizedType>(); auto o_spec = out_specs[0].dyn_cast<UniformQuantizedType>();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 08 10:41:08 UTC 2024 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
// The following functions tests example quantization patterns outputted from // JAX Quantizer. JAX Quantizer should output integer types, which are // composed into `UniformQuantized{|PerAxis}Type` via // `compose_uniform_quantized_type_pass.cc`. // ============================================================================
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0)