- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 171 for Bias (0.06 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir
func.func @convolution_add_add( %lhs: tensor<?x3x2x1xi8>, %rhs: tensor<2x1x1x1xi8>, %zp_offset: tensor<?x2x2x1xi32>, %bias: tensor<1xi32> ) -> tensor<?x2x2x1xi32> { // CHECK-DAG: %[[conv:.*]] = mhlo.convolution // CHECK-DAG: %[[combined:.*]] = chlo.broadcast_add %[[zp_offset:.*]], %[[bias:.*]] // CHECK-DAG: %[[result:.*]] = chlo.broadcast_add %[[conv]], %[[combined]] // CHECK: return %[[result]]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Feb 24 02:26:47 UTC 2024 - 10.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
mlir::cast<quant::QuantizedType>(non_bias_type.getElementType()); non_bias_types.push_back(non_bias_ele_type); } else { // The non-bias hasn't been quantized, let's skip this bias. break; } } // The non-bias hasn't been quantized, let's skip this bias. if (non_bias_types.size() != non_biases.size()) return {}; return func(/*op_types=*/non_bias_types, /*adjusted_quant_dim=*/-1,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/lift_quantizable_spots_as_functions_fusion.td
[(IsNotInLiftedFunc $res), (IsStableHLOConstantOp $bias)], [], (addBenefit 5)>; def LiftDotGeneralWithBias : Pat< (StableHLO_AddOp:$res (StableHLO_DotGeneralOp $lhs, $rhs, $dot_dimension_numbers, $precision_config), (StableHLO_BroadcastInDimOp $bias, $dims)), (LiftAsTFXlaCallModule<"composite_dot_general_with_bias_fn"> (ArgumentList $lhs, $rhs, $bias), (ResultList $res), (NamedAttributeList
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 07:19:09 UTC 2024 - 23.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/optimize.mlir
// CHECK-NEXT: %[[conv:.*]] = "tf.Conv2D"(%arg0, %[[cst]]) // CHECK-NEXT: %[[bias:.*]] = "tf.AddV2"(%[[conv]], %[[cst_0]]) // CHECK-NEXT: return %[[bias]] : tensor<256x8x7x16xf32> } // CHECK-LABEL: convaddv2mul func.func @convaddv2mul(%arg: tensor<256x32x32x3xf32>) -> tensor<256x8x7x16xf32> { %filter = arith.constant dense<2.0> : tensor<3x3x3x16xf32> %bias = arith.constant dense<3.0> : tensor<16xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/mnist_ops_test.py
filter_ = tf.random.uniform([2, 2, 1, 8]) bias = tf.zeros([8]) kwargs = { 'input_': input_, 'filter_': filter_, 'bias': bias, 'stride_w': 2, 'stride_h': 2, 'dilation_w': 1, 'dilation_h': 1, 'padding': 'SAME', 'act': 'RELU' } self._assertOpAndComposite([input_, filter_, bias],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.td
// Pattern rules for lifting ops with bias as functions //===----------------------------------------------------------------------===// def LiftDepthwiseConv2dNativeWithBias : Pat< (TF_BiasAddOp:$res (TF_DepthwiseConv2dNativeOp $input, $filter, $strides, $padding, $explicit_paddings, IsDataFormatNHWC:$data_format, $dilations), $bias, IsDataFormatNHWC:$bias_data_format),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 15.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_lift_variables.mlir
%1 = "tf.ReadVariableOp"(%0) {device = ""} : (tensor<!tf_type.resource<tensor<100x50xf32>>>) -> tensor<100x50xf32> %2 = "tf.VarHandleOp"() {_class = ["loc:@dense/bias"], allowed_devices = [], container = "", device = "", shared_name = "dense/bias"} : () -> tensor<!tf_type.resource<tensor<50xf32>>> %3 = "tf.ReadVariableOp"(%2) {device = ""} : (tensor<!tf_type.resource<tensor<50xf32>>>) -> tensor<50xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 24 05:47:26 UTC 2022 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.td
), [(AreLastTwoDimsTransposed $perm_value), (IsNoneType $bias)]>; // Fuses TFL_FullyConnectedOp and TFL_TransposeOp Rhs to TFL_BatchMatMulOp def FuseTransposeFCRhsToBatchMatmul : Pat< (TFL_FullyConnectedOp 2DTensorOf<[F32]>:$lhs, (TFL_TransposeOp TensorOf<[F32]>:$rhs, (Arith_ConstantOp:$perm_value $p0)), $bias, $TFL_AF_None, $TFL_FCWO_Default, $keep_num_dims, $asymmetric_quantize_inputs ),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 09 23:44:09 UTC 2023 - 2.6K bytes - Viewed (0) -
src/vendor/golang.org/x/net/idna/punycode.go
} t := k - bias if k <= bias { t = tmin } else if k >= bias+tmax { t = tmax } if digit < t { break } w, overflow = madd(0, w, base-t) if overflow { return "", punyError(encoded) } } if len(output) >= 1024 { return "", punyError(encoded) } x := int32(len(output) + 1) bias = adapt(i-oldI, x, oldI == 0) n += i / x
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 09 20:10:36 UTC 2021 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/optimize.td
$dilations, (location $conv)), [(CanFuseMulAndConv2D $filter_value, $mul_value), (HasOneUse $conv)]>; // This rule does the following pattern match and rewrite: // // input bias input value bias value // \ / => \ / \ / // BiasAdd value Mul Mul // \ / \ /
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 22 07:31:23 UTC 2023 - 5.4K bytes - Viewed (0)