- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 64 for Bias (0.04 sec)
-
src/math/floor.go
// return t + Copysign(1, x) // } // return t // } bits := Float64bits(x) e := uint(bits>>shift) & mask if e < bias { // Round abs(x) < 1 including denormals. bits &= signMask // +-0 if e == bias-1 { bits |= uvone // +-1 } } else if e < bias+shift { // Round any abs(x) >= 1 containing a fractional component [0,1). // // Numbers with larger exponents are returned unchanged since they
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 11 16:34:30 UTC 2022 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td
(TF_BiasAddOp:$bias_add $conv_out, (TF_ConstOp:$bias IsFloatElementsAttr:$bias_value), $data_format), (TF_ConstOp:$add_rhs IsFloatElementsAttr:$add_rhs_value)), (TF_BiasAddOp $conv_out, (TF_AddV2Op $bias, (ReshapeTo1DTensor $add_rhs)), $data_format), [(HasOneUse $bias_add), (ReshapableTo1DTensor $add_rhs), (HasEqualElementSize<[-1], [-1]> $bias, $add_rhs)]>; // Fuse AffineOp followed by an MulOp patterns.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_traits.h
: public QuantizationSpecTraitBase< ConcreteType, AccumulatorUniformScale<Bias, Operands...>::Impl> { public: // Whether the index-th operand is a bias. static bool IsBias(int index) { return index == Bias; } // Returns the indexes of all the non-bias operands. static std::vector<int> GetAllNonBiasOperands() { return std::vector<int>({Operands...}); } }; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto
// conversion, then dequantized during inference. // Activation: f32, Weight: qi8, Bias: f32 WEIGHT_ONLY = 1; // Apply default dynamic range quantization. Quantized tensor value's // ranges are determined during graph runtime. // Activation: f32, Weight: qi8, Bias: f32 POST_TRAINING_QUANTIZATION_DYNAMIC_RANGE = 2;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 22 02:20:05 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/c/experimental/ops/nn_ops.h
// Adds `bias` to `value`. Status BiasAdd(AbstractContext* ctx, AbstractTensorHandle* const value, AbstractTensorHandle* const bias, AbstractTensorHandle** output, const char* data_format = "NHWC", const char* name = nullptr, const char* raw_device_name = nullptr); // The backward operation for "BiasAdd" on the "bias" tensor. Status BiasAddGrad(AbstractContext* ctx,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py
'NewFullyConnected', inputs=['input_: T', 'filter_: T', 'bias: T'], attrs=['act: {"", "RELU", "RELU6", "TANH"} = ""'], derived_attrs=['T: {float, int8}'], outputs=['o: T']) def _composite_fully_connected(input_, filter_, bias, act): res = tf.raw_ops.MatMul( a=input_, b=filter_, transpose_a=False, transpose_b=True) res = tf.raw_ops.Add(x=res, y=bias) if act == 'RELU': return tf.raw_ops.Relu(features=res)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 31 20:23:51 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize-after-quantization.mlir
func.return %1 : tensor<256x8x7x3xf32> // CHECK: %[[weight:.*]] = arith.constant dense<3.000000e+00> : tensor<3x3x3x3xf32> // CHECK: %[[bias:.*]] = arith.constant dense<[1.500000e+00, 3.000000e+00, 4.500000e+00]> // CHECK: %[[conv:.*]] = "tfl.conv_2d"(%arg0, %[[weight]], %[[bias]]) // CHECK: return %[[conv]] : tensor<256x8x7x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_mark_initialized_variables.mlir
func.func @serving_default(%arg0: tensor<!tf_type.resource<tensor<100x50xf32>>> {tf.resource_name = "dense/kernel"}, %arg1: tensor<!tf_type.resource<tensor<50xf32>>> {tf.resource_name = "dense/bias"}) -> (tensor<100x50xf32> {tf_saved_model.index_path = ["dense_2"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "", outputs = "dense_2/Add:0"}, tf_saved_model.exported_names = ["serving_default"]} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 2.1K bytes - Viewed (0) -
tensorflow/c/experimental/ops/nn_ops.cc
} // Op: BiasAdd() // Summary: Adds `bias` to `value`. // // Description: // This is a special case of `tf.add` where `bias` is restricted to be 1-D. // Broadcasting is supported, so `value` may have any number of dimensions. Status BiasAdd(AbstractContext* ctx, AbstractTensorHandle* const value, AbstractTensorHandle* const bias, AbstractTensorHandle** output,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils.h
// that also contains other supporting ops needed to construct the operands for // the fused op. The caller provides the containing FuncOp as input with // arguments specifying the input, weight, projection and bias. // The weight, projection, bias and layer norm scale all need to be // RankedTensorType. // This class sets the layer norm coefficients to NoneType. class ConvertLSTMCellSimpleToFusedLSTM { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 03 00:14:05 UTC 2023 - 7.3K bytes - Viewed (0)