- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 32 for SYMMETRIC (0.11 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir
// HISTOGRAM-MSE-SYMMETRIC-CHECK-NEXT: "tf.AddV2" // HISTOGRAM-MSE-SYMMETRIC-CHECK-NEXT: return // HISTOGRAM-MSE-SYMMETRIC-CHECK: func @composite_conv2d_with_relu6_fn // HISTOGRAM-MSE-SYMMETRIC-CHECK-NEXT: "tf.Conv2D" // HISTOGRAM-MSE-SYMMETRIC-CHECK-NEXT: "tf.Relu6" // HISTOGRAM-MSE-SYMMETRIC-CHECK-NEXT: return // ----- module { // CHECK-LABEL: func.func @main
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 32.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
// the value range isn't straddling zero, an empty type is returned. The min/max // are adjusted to be symmetric if `symmetric` flag is set to True. And // `symmetric` can only be set to true when it is signed and narrow_range. Type GetUniformQuantizedTypeForWeight(ElementsAttr attr, bool symmetric, unsigned num_bits, bool is_signed, bool narrow_range,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
// works for both this value and 0.0. if (single_value < 0.0) { mins[0] = single_value; maxs[0] = symmetric ? -single_value : 0.0; } else if (single_value > 0.0) { mins[0] = symmetric ? -single_value : 0.0; maxs[0] = single_value; } else { mins[0] = maxs[0] = single_value; } for (int i = 1; i < dim_size; ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
src/math/big/arith_test.go
arg = argVV{a.z, a.y, a.x, a.c} testFunVV(t, "addVV_g symmetric", addVV_g, arg) testFunVV(t, "addVV symmetric", addVV, arg) arg = argVV{a.x, a.z, a.y, a.c} testFunVV(t, "subVV_g", subVV_g, arg) testFunVV(t, "subVV", subVV, arg) arg = argVV{a.y, a.z, a.x, a.c} testFunVV(t, "subVV_g symmetric", subVV_g, arg) testFunVV(t, "subVV symmetric", subVV, arg) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 02 14:43:52 UTC 2022 - 19.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc
Type weight_type; if (IsPerTensor(weight_only_ptq)) { weight_type = dyn_cast<quant::QuantizedType>( quant::GetUniformQuantizedTypeForWeight( attr, /*symmetric=*/true, /*num_bits=*/8, /*is_signed=*/true, /*narrow_range=*/true, /*legacy_float_scale=*/false)); } else { int quantization_dimension = GetQuantizationDimension(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc
PatternRewriter& rewriter, TF::ConstOp op, tensorflow::quantization::QuantizationComponentSpec& weight_spec) { // TODO - b/278949920: Enable Per-Channel Quantization for XLA Opset // Currently, support symmetric, per-tensor, signed int8 const bool kIsNarrowRange = true; const bool kIsSigned = true; const int kBitWidth = 8; DenseFPElementsAttr attr;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0) -
src/math/big/nat_test.go
arg = argNN{a.z, a.y, a.x} testFunNN(t, "add symmetric", nat.add, arg) arg = argNN{a.x, a.z, a.y} testFunNN(t, "sub", nat.sub, arg) arg = argNN{a.y, a.z, a.x} testFunNN(t, "sub symmetric", nat.sub, arg) } for _, a := range prodNN { arg := a testFunNN(t, "mul", nat.mul, arg) arg = argNN{a.z, a.y, a.x} testFunNN(t, "mul symmetric", nat.mul, arg) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jan 09 15:29:36 UTC 2024 - 26.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// When `disable_per_channel_` is false, per-channel symmetric quantization // parameters are created from the weights when the ops support per-channel // quantization. Otherwise, uses per-tensor asymmetric quantization with // narrow range. // per-axis quantization weight, with symmetric min/max enforced. final_type = GetUniformQuantizedPerAxisTypeForWeight(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/cc/gradients/array_grad_test.cc
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)); auto paddings = Const(scope_, {{1, 1}, {2, 2}}); TensorShape y_shape({4, 7}); auto y = MirrorPad(scope_, x, paddings, "SYMMETRIC"); RunTest(x, x_shape, y, y_shape); } TEST_F(ArrayGradTest, MirrorPadGradGrad_Reflect) { TensorShape x_shape({4, 7}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 10 23:33:32 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
// Computes the effective min/max values of the attribute values. quant::ExtractMinMaxFromAttr(attr, /*dim_size=*/1, /*slice_size=*/1, /*symmetric=*/true, mins, maxs); double scale = maxs[0] / -llvm::minIntN(tensor_property.number_of_bits); quant_type = UniformQuantizedType::getChecked( const_op->getLoc(), quant::QuantizationFlags::Signed,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0)