- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 130 for Bias (0.13 sec)
-
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
output = input / (bias + alpha * sqr_sum) ** beta For details, see [Krizhevsky et al., ImageNet classification with deep convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). }]; let arguments = (ins TFL_FpTensor:$input, I32Attr:$radius, F32Attr:$bias, F32Attr:$alpha,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_without_identity.pbtxt
# MLIR: %[[bias:.*]] = "tfl.pseudo_qconst"() <{qtype = tensor<186x!quant.uniform<i32:f32:0 # MLIR: %[[weight:.*]] = "tfl.pseudo_qconst"() <{qtype = tensor<186x1x1x256x!quant.uniform<i8<-127:127>:f32:0, {0.12581039038230116,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_without_identity_4bit.pbtxt
# MLIR: %[[bias:.*]] = "tfl.pseudo_qconst"() <{qtype = tensor<186x!quant.uniform<i32:f32:0 # MLIR: %[[weight:.*]] = "tfl.pseudo_qconst"() <{qtype = tensor<186x1x1x256x!quant.uniform<i4<-7:7>:f32:0, {2.2825599397931779,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
// proj_bias is F32 EXPECT_TRUE(mlir::cast<RankedTensorType>(it->getOperand(17).getType()) .getElementType() .isF32()); // output gate bias is 0 since it is out of bounds of the bias tensor, so // we set its value as a const tensor of specified size and value 0. EXPECT_TRUE( mlir::cast<ElementsAttr>(mlir::cast<mlir::arith::ConstantOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir
} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32> return %0 : tensor<*xf32> } // CHECK-LABEL: func @depthwise_conv // CHECK-DAG: %[[bias:.*]] = "tf.Const"() <{value = dense<0.000000e+00> : tensor<3xf32>}> : () -> tensor<3xf32> // CHECK-DAG: %[[q_w1:.*]] = "tf.Const"() <{value = #tf_type<tensor_proto : "0x746674{{.*}}-> tensor<2x3x1x3x!tf_type.qint8>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
struct OpQuantSpec { // Maps the operand index of a bias input to its quantization specifications, // including the non-bias operand indexes and the method retrieving // quantization parameters from list of parameters of the non-bias operands. // This map is empty if the op doesn't have a bias operand. BiasParamsMap biases_params;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
} // If all quantized or floating point then types are consistent. // Int is valid in combination with both quantized and floating point. // This occurs when doing qi16 convolution, as bias is passed as a // non-quantized int64 if (allTypesFp || allTypesQuantizedOrInt) return failure(); Location loc = op->getLoc(); SmallVector<Value> newOperands; newOperands.reserve(op->getNumOperands());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h
ArrayRef<Value> results); // Add the second argument to the first argument, which is expected to be an // argument list. // Used to attach bias to einsum argument list. SmallVector<Value> AppendToVector(ArrayRef<Value> arguments, Value append); // Checks if the `Method` attatched to the given `tf.XlaCallModule` op has // `WeightOnlyPtq`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tools/op_quant_spec_getters_gen.cc
<< "<" << op.getQualCppClassName() << ">::GetResultQuantizedType(i));\n"; matches.clear(); } // There is a "AccumulatorUniformScale" trait, set the type for bias. if (acc_uniform_trait_regex.match(trait_str, &matches)) { OUT(4) << "spec->biases_params.emplace(std::make_pair(" << matches[1] << ", std::make_pair(tfl.GetAllNonBiasOperands(),"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 15 11:18:44 UTC 2024 - 4.9K bytes - Viewed (0) -
src/strconv/ftoa.go
// 2) shift decimal by exponent // 3) read digits out & format package strconv import "math" // TODO: move elsewhere? type floatInfo struct { mantbits uint expbits uint bias int } var float32info = floatInfo{23, 8, -127} var float64info = floatInfo{52, 11, -1023} // FormatFloat converts the floating-point number f to a string,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 14:21:28 UTC 2024 - 13.9K bytes - Viewed (0)