- Sort Score
- Result 10 results
- Languages All
Results 1 - 9 of 9 for Bias (0.09 sec)
-
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
mlir::cast<quant::QuantizedType>(non_bias_type.getElementType()); non_bias_types.push_back(non_bias_ele_type); } else { // The non-bias hasn't been quantized, let's skip this bias. break; } } // The non-bias hasn't been quantized, let's skip this bias. if (non_bias_types.size() != non_biases.size()) return {}; return func(/*op_types=*/non_bias_types, /*adjusted_quant_dim=*/-1,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h
} const int64_t cost_per_col = 2 * weight_type.getNumElements(); *count = cost_per_col * cols; auto bias = op->getOperand(2); if (bias) { auto bias_type = mlir::dyn_cast_or_null<mlir::RankedTensorType>(bias.getType()); if (bias_type && bias_type.hasStaticShape()) { *count += output_type.getNumElements(); } } return true; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td
def FoldQuantWeightsIntoTposeConv : Pat< (TFL_TransposeConvOp $output_shape, (TFL_DequantizeOp $quant_weights), $quant_input, $bias, $padding, $stride_h, $stride_w, $faf), (TFL_TransposeConvOp $output_shape, $quant_weights, $quant_input, $bias, $padding, $stride_h, $stride_w, $faf),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:10:13 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir
%weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32> %bias = arith.constant dense<[7.11401462, 7.05456924]> : tensor<2xf32> %q_input= "quantfork.qcast"(%input) : (tensor<1x3x4x3xf32>) -> tensor<1x3x4x3x!quant.uniform<i8:f32, 0.58810077742034317:-128>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/optional_input.json
// RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck %s // This test is to test that if the flatbuffer omits the last optional input `bias` of tfl.conv_2d op, the flatbuffer_importer will automatically adds `none` value to tfl.conv_2d. // CHECK: %[[CST:.*]] = "tfl.no_value"() <{value}> : () -> none
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
} // If all quantized or floating point then types are consistent. // Int is valid in combination with both quantized and floating point. // This occurs when doing qi16 convolution, as bias is passed as a // non-quantized int64 if (allTypesFp || allTypesQuantizedOrInt) return failure(); Location loc = op->getLoc(); SmallVector<Value> newOperands; newOperands.reserve(op->getNumOperands());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h
ArrayRef<Value> results); // Add the second argument to the first argument, which is expected to be an // argument list. // Used to attach bias to einsum argument list. SmallVector<Value> AppendToVector(ArrayRef<Value> arguments, Value append); // Checks if the `Method` attatched to the given `tf.XlaCallModule` op has // `WeightOnlyPtq`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
} // Rewire the outputs. result.replaceAllUsesWith(new_result); } // Remove the old op. op->erase(); }); } // Fold quantized i32 (normally bias) into their float values. struct FoldQuantizedI32ToFloat : public OpRewritePattern<TFL::DequantizeOp> { using OpRewritePattern<TFL::DequantizeOp>::OpRewritePattern;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc
auto fc_op = rewriter.create<TFL::FullyConnectedOp>( bmm_op->getLoc(), ArrayRef<Type>{output_type}, /*input=*/output_lhs, /*filter=*/output_rhs, /*bias=*/no_input, /*fused_activation_function=*/rewriter.getStringAttr("NONE"), /*weights_format=*/rewriter.getStringAttr("DEFAULT"), /*keep_num_dims=*/rewriter.getBoolAttr(true),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.6K bytes - Viewed (0)