- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for Bias (0.05 sec)
-
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
def UndoBroadcastFullyConnectedBiasAdd : Pat< (TFL_AddOp $lhs, (Arith_ConstantOp:$const_value $bias), TFL_AF_None), (TFL_AddOp $lhs, (Arith_ConstantOp (FlattenTo1D $bias)), TFL_AF_None), [(AnyStaticShapeTensor $lhs), (IsLastDimEqualToNumElements $bias, $bias), (HasRankAtMost<4> $bias), (HasRankAtLeast<2> $bias), (IsDefinedByFullyConnectedOp $lhs),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
Args: weight_shape: Shape of the weight tensor. bias_size: If None, do not use bias. Else, use given size as bias. activation_fn: The activation function to be used. No activation function if None. use_biasadd: If True, use BiasAdd for adding bias, else use AddV2. """ self.bias_size = bias_size self.activation_fn = activation_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
// PatternRewriter &rewriter, Location loc, // Type result_type, Value input, // Value filter, Value bias) const; // // And also the following method for getting the dimension for bias tensor: // // int64_t getBiasDim(ArrayRef<int64_t> filterShape) const; template <typename ConcreteType, typename TFConvOpType> class ConvertTFConvOp : public RewritePattern {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir
%bias = arith.constant dense<1.0> : tensor<32xf32> %input = "tfl.dequantize"(%arg0) : (tensor<1x224x224x3x!quant.uniform<u8:f32, 1.5>>) -> tensor<1x224x224x3xf32> %weight = "tfl.dequantize"(%arg1) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32:3, {1.0,2.0,3.0}>>) -> tensor<32x3x3x3xf32> %conv = "tfl.conv_2d"(%input, %weight, %bias) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 67.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
// CHECK: return %[[UNIFORM_QUANTIZE_1]] : tensor<1x3x4x2x!quant.uniform<i8:f32, {{.*}}>> // ----- // Tests that fused pattern for convolution + bias is properly quantized. // Checks that fused functions with 1D bias is properly quantized. // The 1D bias should be broadcasted in dims [3], where it initially has // `quantizedDimension=0`, but has `quantizedDimension=3` after broadcasting.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0) -
src/runtime/pprof/pprof_test.go
t.Fatal("block profile is missing expected functions") } // stddev of bias from 100 runs on local machine multiplied by 10x const threshold = 0.2 if bias := (il - fs) / il; math.Abs(bias) > threshold { t.Fatalf("bias: abs(%f) > %f", bias, threshold) } else { t.Logf("bias: abs(%f) < %f", bias, threshold) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:42:28 UTC 2024 - 68.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
ASSERT_THAT(weights_zero_points, SizeIs(out_channel_size)); ASSERT_THAT(input_tensor->quantization->scale, SizeIs(1)); ASSERT_THAT(output_tensor->quantization->scale, SizeIs(1)); const float eps = 1e-7; // Bias scale should be input * per_channel_weight_scale. for (size_t i = 0; i < out_channel_size; i++) { EXPECT_THAT(bias_scales[i], FloatNear(input_tensor->quantization->scale[0] *
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 49.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/mlrt/while_to_map_fn.mlir
// CHECK-NEXT: [[mpy:%.*]] = "tf.MatMul"(%arg6, [[weight]]) // CHECK-NEXT: [[element_index:%.*]] = "tf.AddV2"(%arg3, [[cst_1]]) // CHECK-NEXT: [[bias:%.*]] = "tf.GatherV2"(%arg7, %arg3, [[cst_0]]) // CHECK-NEXT: [[res:%.*]] = "tf.AddV2"([[mpy]], [[bias]]) // CHECK-NEXT: [[ta_0:%.*]] = "tf_mlrt.tf_await"(%arg0) // CHECK-NEXT: [[ta_1:%.*]] = "tf.TensorListSetItem"([[ta_0]], %arg3, [[res]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 06:40:22 UTC 2024 - 68.6K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewrite.go
func reciprocalExact64(c float64) bool { b := math.Float64bits(c) man := b & (1<<52 - 1) if man != 0 { return false // not a power of 2, denormal, or NaN } exp := b >> 52 & (1<<11 - 1) // exponent bias is 0x3ff. So taking the reciprocal of a number // changes the exponent to 0x7fe-exp. switch exp { case 0: return false // ±0 case 0x7ff: return false // ±inf case 0x7fe:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 64.2K bytes - Viewed (0)