- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 171 for Bias (0.07 sec)
-
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.td
"\""# func_name #"\", $0...)", returns>; // Add the second argument to the first argument, which is expected to be an // argument list. // bias(einsum(inputs), bias) --> einsum_with_bias(AppendToVector(inputs, bias)) // Since inputs is a vector in case of einsum, we cannot use ArgumentList here. def AppendToVector : NativeCodeCall<"AppendToVector($0, $1)">;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 25 00:32:20 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad_test.cc
A.reset(A_raw); } // Bias float Bias_vals[] = {2.0f, 3.0f}; int64_t Bias_dims[] = {2}; AbstractTensorHandlePtr Bias; { AbstractTensorHandle* Bias_raw; status_ = TestTensorHandleWithDims<float, TF_FLOAT>( immediate_execution_ctx_.get(), Bias_vals, Bias_dims, 1, &Bias_raw); ASSERT_EQ(errors::OK, status_.code()) << status_.message(); Bias.reset(Bias_raw); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc
PatternRewriter& rewriter) { auto bias = op->getOperand(bias_idx); if (!mlir::isa<NoneType>(bias.getType())) return failure(); // Proceed to create a zero bias. auto output = op->getResult(0); auto output_type = mlir::dyn_cast_or_null<RankedTensorType>(output.getType()); if (!output_type) return failure(); // bias should be a vector sized of the last output dim.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 25.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
Args: weight_shape: Shape of the weight tensor. bias_size: If None, do not use bias. Else, use given size as bias. activation_fn: The activation function to be used. No activation function if None. use_biasadd: If True, use BiasAdd for adding bias, else use AddV2. """ self.bias_size = bias_size self.activation_fn = activation_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir
%weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32> %bias = arith.constant dense<[7.11401462, 7.05456924]> : tensor<2xf32> %q_input= "quantfork.qcast"(%input) : (tensor<1x3x4x3xf32>) -> tensor<1x3x4x3x!quant.uniform<i8:f32, 0.58810077742034317:-128>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 6.4K bytes - Viewed (0) -
src/math/sqrt.go
// huge + tiny is equal to huge, and whether huge - tiny is // equal to huge for some floating point number "huge" and "tiny". // // // Notes: Rounding mode detection omitted. The constants "mask", "shift", // and "bias" are found in src/math/bits.go // Sqrt returns the square root of x. // // Special cases are: // // Sqrt(+Inf) = +Inf // Sqrt(±0) = ±0 // Sqrt(x < 0) = NaN // Sqrt(NaN) = NaN func Sqrt(x float64) float64 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 15 17:07:57 UTC 2022 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
template <typename T> Operation* GetBroadcastedUserOp(Operation* op) { // Broadcast bias for known input shape. auto broadcast_in_dim_op = FindUserOfType<BroadcastInDimOp>(op); if (broadcast_in_dim_op != nullptr) { auto target_op = FindUserOfType<T>(broadcast_in_dim_op); if (target_op != nullptr) return target_op; } // Broadcast bias for unknown input shape. auto get_dimension_size_op = FindUserOfType<GetDimensionSizeOp>(op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir
func.return %fc : tensor<1x2xf32> // CHECK-DAG: %[[weight:.*]] = arith.constant dense<{{\[\[}}0.000000e+00, 1.000000e+00] // CHECK-DAG: %[[bias:.*]] = arith.constant dense<[0.000000e+00, 2147364.75]> // CHECK-DAG: %[[b_q:.*]] = "tfl.quantize"(%[[bias]]){{.*}}quant.uniform<i32:f32:0, {7.8740158861230386E-10,0.0019998892694710656}>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/optional_input.json
// RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck %s // This test is to test that if the flatbuffer omits the last optional input `bias` of tfl.conv_2d op, the flatbuffer_importer will automatically adds `none` value to tfl.conv_2d. // CHECK: %[[CST:.*]] = "tfl.no_value"() <{value}> : () -> none
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/resources/decomposition_lib.mlir
tfr.func @tf__my_biased_dense(%input: !tfr.tensor, %weight: !tfr.tensor, %bias: !tfr.tensor, %act: !tfr.attr{tfr.name="act", tfr.default=""}) -> !tfr.tensor { %dot = tfr.call @tf__mat_mul(%input, %weight) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor %add = tfr.call @tf__add(%dot, %bias) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor %relu = tfr.constant "relu" -> !tfr.attr
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 13 16:33:28 UTC 2021 - 4.2K bytes - Viewed (0)