- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 104 for RELU (0.06 sec)
-
tensorflow/compiler/mlir/lite/flatbuffer_operator.cc
llvm::StringRef str, flatbuffers::FlatBufferBuilder* builder) { return llvm::StringSwitch<tflite::ActivationFunctionType>(str) .Case("NONE", tflite::ActivationFunctionType_NONE) .Case("RELU", tflite::ActivationFunctionType_RELU) .Case("RELU_N1_TO_1", tflite::ActivationFunctionType_RELU_N1_TO_1) .Case("RELU6", tflite::ActivationFunctionType_RELU6) .Case("TANH", tflite::ActivationFunctionType_TANH)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 38K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/python/tfr_gen_test.py
y = _tfr_quant_raw_data(x) s, z = _tfr_quant_qparam(x) s = _tfr_quant_scale_factor(1.0, [s, s]) s = _tfr_quant_scale_factor(1.0, [s]) y = math_ops.Sub(y, z) qmin, qmax = _tfr_quant_act_range('RELU', 1.0, 0) (qmin, qmax) # pylint: disable=pointless-statement d = _tfr_quant_rescale(y, s, 0) e = math_ops.Cast(x=d, DstT=dtypes.int16) f = math_ops.Cast(x=e, DstT=dtypes.int8) return f
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 13 16:33:28 UTC 2021 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir
%5 = "quantfork.stats"(%4) {layerStats = dense<[-56.2916565, 122.922478]> : tensor<2xf32>} : (tensor<1x4xf32>) -> tensor<1x4xf32> %6 = "tfl.svdf"(%0, %1, %2, %3, %5) {fused_activation_function = "RELU", rank = 1 : i32} : (tensor<1x3xf32>, tensor<2x3xf32>, tensor<2x1xf32>, tensor<2xf32>, tensor<1x4xf32>) -> tensor<1x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 52.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir
func.return %2 : tensor<4x8xf32> } //===----------------------------------------------------------------------===// // Relu op legalizations. //===----------------------------------------------------------------------===// // ----- // CHECK-LABEL: func @relu func.func @relu(%arg0: tensor<1xi32>) -> tensor<1xi32> { // CHECK: %[[ZERO:.*]] = mhlo.constant dense<0> : tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 335.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
// CHECK: "tfl.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<4x5xi32>, tensor<1x5xi32>, tensor<2xi64>) -> tensor<4x5xi32> } func.func @testReluI32(%arg0: tensor<1xi32>) -> tensor<1xi32> { %0 = "tf.Relu"(%arg0) : (tensor<1xi32>) -> tensor<1xi32> func.return %0: tensor<1xi32> // CHECK-LABEL: testReluI32 // CHECK: %[[CONST_0:.*]] = arith.constant dense<0> : tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 05 01:54:33 UTC 2024 - 153.4K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
"TanhGrad", "Pow", "SquaredDifference", "ApproximateEqual", // Others "AddN", "Bitcast", "Cast", "ClipByValue", "Const", "Empty", "Identity", "IdentityN", "Relu", "Relu6", "ReluGrad", "Relu6Grad", "LeakyReluGrad", "Elu", "EluGrad", "Selu", "SeluGrad", "Select", "SelectV2", "Transpose", "ConjugateTranspose",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir
"tf.Yield"(%t0, %t1, %t2) : (tensor<2xf32>, tensor<2xf32>, tensor<2xf32>) -> () }, { %e0 = "tf.Neg"(%arg1) : (tensor<2xf32>) -> tensor<2xf32> %e1 = "tf.Relu"(%arg1) : (tensor<2xf32>) -> tensor<2xf32> %e2 = "tf.Sin"(%arg1) : (tensor<2xf32>) -> tensor<2xf32> "tf.Yield"(%e0, %e1, %e2) : (tensor<2xf32>, tensor<2xf32>, tensor<2xf32>) -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 23 14:40:35 UTC 2023 - 236.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
let summary = "Computes rectified linear gradients for a Relu operation."; let arguments = (ins Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu operation.}]>:$gradients, Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu operation, OR the outputs of that operation (both work equivalently).}]>:$features );
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// The actual Optimize Pass. namespace { #define GEN_PASS_DEF_OPTIMIZEPASS #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc" constexpr char kRelu[] = "RELU"; constexpr char kRelu6[] = "RELU6"; constexpr char kRelu1[] = "RELU_N1_TO_1"; ElementsAttr FlattenTo1D(Attribute a) { auto elements = mlir::cast<DenseElementsAttr>(a);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
// CHECK-PER-TENSOR: return %[[UNIFORM_QUANTIZE_0]] : tensor<?x3x4x2x!quant.uniform<i8:f32, {{.*}}>> // ----- // Tests that fused pattern for convolution + bias + relu with // dynamic batch dimension is properly quantized. // Note that this checks for identical condition as // quantize_conv_with_bias_dynamic_fn, omitting stablehlo.maximum.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0)