- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 74 for Bias (0.17 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
template <typename T> Operation* GetBroadcastedUserOp(Operation* op) { // Broadcast bias for known input shape. auto broadcast_in_dim_op = FindUserOfType<BroadcastInDimOp>(op); if (broadcast_in_dim_op != nullptr) { auto target_op = FindUserOfType<T>(broadcast_in_dim_op); if (target_op != nullptr) return target_op; } // Broadcast bias for unknown input shape. auto get_dimension_size_op = FindUserOfType<GetDimensionSizeOp>(op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir
func.return %fc : tensor<1x2xf32> // CHECK-DAG: %[[weight:.*]] = arith.constant dense<{{\[\[}}0.000000e+00, 1.000000e+00] // CHECK-DAG: %[[bias:.*]] = arith.constant dense<[0.000000e+00, 2147364.75]> // CHECK-DAG: %[[b_q:.*]] = "tfl.quantize"(%[[bias]]){{.*}}quant.uniform<i32:f32:0, {7.8740158861230386E-10,0.0019998892694710656}>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/optional_input.json
// RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck %s // This test is to test that if the flatbuffer omits the last optional input `bias` of tfl.conv_2d op, the flatbuffer_importer will automatically adds `none` value to tfl.conv_2d. // CHECK: %[[CST:.*]] = "tfl.no_value"() <{value}> : () -> none
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
EXPECT_NE(float_tensor, nullptr); // If the tensor is a weight, it should have type INT8, otherwise it // should stay with type FLOAT32. // If the tensor is a bias, it should have type FLOAT32. // // Check with float_tensor name since quantized tensor // may be renamed. if (float_tensor->name()->str() == "conv_bias") {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
result_type = bias_add.getResult().getType(); } auto fused_loc = rewriter.getFusedLoc(locations); // The fused contraction has the same operands as the original contraction // with `bias` from the BiasAddOp appended. SmallVector<Value, 4> operands(contraction.operand_begin(), contraction.operand_end()); operands.push_back(bias_add.getBias());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// Restrict maximum absolute value of bias within INT_MAX / 2, to make some // room for accumulator. if (auto bias_quantized_type = mlir::dyn_cast<UniformQuantizedType>(params); bias_quantized_type != nullptr) { double bias_half_range = 0.0f; for (auto bias : bias_values.getValues<APFloat>()) { if (bias_half_range < std::abs(bias.convertToFloat())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils.cc
SetWeightForRecurrentToForgetGate(); SetWeightForRecurrentToOutputGate(); // Extract bias to cifg gates via slicing the bias tensor SetBiasToCellGate(); SetBiasToInputGate(); SetBiasToForgetGate(); SetBiasToOutputGate(); // Extract projection and set an empty projection bias SetProjection(); SetProjectionBias(); // Set the variable tensors SetInputActivationState();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 36.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
// TensorFlow Conv3D has no bias, optimization patterns will fuse Conv3D // with other ops can fill the bias. Value none = rewriter.create<TFL::NoValueOp>( op->getLoc(), rewriter.getNoneType(), rewriter.getUnitAttr()); rewriter.replaceOpWithNewOp<TFL::Conv3DOp>( op, tf_op.getType(), tf_op.getInput(), tf_op.getFilter(), /*bias=*/none, dilation_depth_factor, dilation_height_factor,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
// PatternRewriter &rewriter, Location loc, // Type result_type, Value input, // Value filter, Value bias) const; // // And also the following method for getting the dimension for bias tensor: // // int64_t getBiasDim(ArrayRef<int64_t> filterShape) const; template <typename ConcreteType, typename TFConvOpType> class ConvertTFConvOp : public RewritePattern {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir
%bias = arith.constant dense<1.0> : tensor<32xf32> %input = "tfl.dequantize"(%arg0) : (tensor<1x224x224x3x!quant.uniform<u8:f32, 1.5>>) -> tensor<1x224x224x3xf32> %weight = "tfl.dequantize"(%arg1) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32:3, {1.0,2.0,3.0}>>) -> tensor<32x3x3x3xf32> %conv = "tfl.conv_2d"(%input, %weight, %bias) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 67.5K bytes - Viewed (0)