- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 77 for Bias (0.08 sec)
-
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
mlir::cast<quant::QuantizedType>(non_bias_type.getElementType()); non_bias_types.push_back(non_bias_ele_type); } else { // The non-bias hasn't been quantized, let's skip this bias. break; } } // The non-bias hasn't been quantized, let's skip this bias. if (non_bias_types.size() != non_biases.size()) return {}; return func(/*op_types=*/non_bias_types, /*adjusted_quant_dim=*/-1,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
auto attr = rewriter.getZeroAttr(type); bias = rewriter.create<arith::ConstantOp>(add_op.getLoc(), type, attr); auto none_af = rewriter.getStringAttr("NONE"); bias = rewriter.create<AddOp>(add_op.getLoc(), bias, constant_val, none_af) .getOutput(); } else { // If there no pre-existing bias and the `constant_val` is 1D, simply
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
def UndoBroadcastFullyConnectedBiasAdd : Pat< (TFL_AddOp $lhs, (Arith_ConstantOp:$const_value $bias), TFL_AF_None), (TFL_AddOp $lhs, (Arith_ConstantOp (FlattenTo1D $bias)), TFL_AF_None), [(AnyStaticShapeTensor $lhs), (IsLastDimEqualToNumElements $bias, $bias), (HasRankAtMost<4> $bias), (HasRankAtLeast<2> $bias), (IsDefinedByFullyConnectedOp $lhs),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
} } // Creates a new `tfl.qconst` op for the bias. The bias values are 0s, because // this bias a dummy bias (note that bias fusion is not considered for this // transformation). The quantization scale for the bias is input scale * // filter scale. `filter_const_op` is used to retrieve the filter scales and // the size of the bias constant. TFL::QConstOp CreateTflConstOpForDummyBias(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h
} const int64_t cost_per_col = 2 * weight_type.getNumElements(); *count = cost_per_col * cols; auto bias = op->getOperand(2); if (bias) { auto bias_type = mlir::dyn_cast_or_null<mlir::RankedTensorType>(bias.getType()); if (bias_type && bias_type.hasStaticShape()) { *count += output_type.getNumElements(); } } return true; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir
// CHECK: return %[[TRANSPOSE_1]] // ----- // Tests that a `add(convolution(%activation, %weight), %bias)` pattern with the // activation tensor of NCHW format and non-constant bias is converted to NHWC // convolution, but without the deferred transpose for `stablehlo.add`. // Transpose ops are inserted to the activation and output of
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 12.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td
def FoldQuantWeightsIntoTposeConv : Pat< (TFL_TransposeConvOp $output_shape, (TFL_DequantizeOp $quant_weights), $quant_input, $bias, $padding, $stride_h, $stride_w, $faf), (TFL_TransposeConvOp $output_shape, $quant_weights, $quant_input, $bias, $padding, $stride_h, $stride_w, $faf),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:10:13 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir
%weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32> %bias = arith.constant dense<[7.11401462, 7.05456924]> : tensor<2xf32> %q_input= "quantfork.qcast"(%input) : (tensor<1x3x4x3xf32>) -> tensor<1x3x4x3x!quant.uniform<i8:f32, 0.58810077742034317:-128>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py
bias_fn: Optional[ops.Operation] = None, activation_fn: Optional[ops.Operation] = None, ) -> module.Module: class MatmulModel(module.Module): """A simple model with a single matmul. Bias and activation function are optional. """ def __init__( self, weight_shape: Sequence[int], ) -> None: """Initializes a MatmulModel. Args:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc
PatternRewriter& rewriter) { auto bias = op->getOperand(bias_idx); if (!mlir::isa<NoneType>(bias.getType())) return failure(); // Proceed to create a zero bias. auto output = op->getResult(0); auto output_type = mlir::dyn_cast_or_null<RankedTensorType>(output.getType()); if (!output_type) return failure(); // bias should be a vector sized of the last output dim.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 25.4K bytes - Viewed (0)