- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 108 for Selu (0.05 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-merge-fusion-with-dequantize -split-input-file -verify-diagnostics | FileCheck %s // Merge fusion with dequantize for relu case. module attributes {tf_saved_model.semantics} { // CHECK-LABEL: func.func private @merge_relu_fusion func.func private @merge_relu_fusion(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 23:45:53 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py
padding: str = 'SAME', has_func_alias: bool = False, ) -> module.Module: class ConvModel(module.Module): """A simple model with a single conv2d, bias and relu.""" def __init__(self): self.out_channel_size = filter_shape[-1] # This ensures filters will have different value range per out channel self.filters = np.stack( [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.td
range for the fused activation `act` with the quantization defined by the `scale` and `zero point`. Currently, the allowed activations are `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`. Example: ```mlir %3, %4 = tfr.quant_act_range(%2, %1, %0) : (tfr.attr, float, i64) -> (tfr.tensor, tfr.tensor) ``` }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 10:54:29 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
func.return %0 : tensor<*xf32> } // CHECK-LABEL: testMaximumOfZeroToReluFloat func.func @testMaximumOfZeroToReluFloat(%arg0: tensor<4xf32>) -> tensor<4xf32> { // CHECK: %0 = "tf.Relu"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<4xf32>) -> tensor<4xf32> // CHECK: return %0 %cst_0 = arith.constant dense<0.000000e+00> : tensor<f32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
dilations: Sequence[int] = (1, 1, 1, 1), padding: str = 'SAME', ): class DepthwiseConvModel(module.Module): """A simple model with a single depthwise conv2d, bias and relu.""" def __init__(self): self.out_channel_size = filter_shape[2] * filter_shape[3] # This ensures filters will have different value range per out channel self.filters = np.stack(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir
%1 = "tf.Relu"(%0) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32> return %1 : tensor<1x3x2x2xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 25.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
// CHECK-LABEL:cos // CHECK: "tfl.cos"(%arg0) : (tensor<f32>) -> tensor<f32> } func.func @elu(%arg0: tensor<11x16xf32>) -> tensor<11x16xf32> { %0 = "tf.Elu"(%arg0) : (tensor<11x16xf32>) -> tensor<11x16xf32> func.return %0 : tensor<11x16xf32> // CHECK-LABEL:elu // CHECK: "tfl.elu"(%arg0) : (tensor<11x16xf32>) -> tensor<11x16xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 05 01:54:33 UTC 2024 - 153.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_operator.cc
llvm::StringRef str, flatbuffers::FlatBufferBuilder* builder) { return llvm::StringSwitch<tflite::ActivationFunctionType>(str) .Case("NONE", tflite::ActivationFunctionType_NONE) .Case("RELU", tflite::ActivationFunctionType_RELU) .Case("RELU_N1_TO_1", tflite::ActivationFunctionType_RELU_N1_TO_1) .Case("RELU6", tflite::ActivationFunctionType_RELU6) .Case("TANH", tflite::ActivationFunctionType_TANH)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 38K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir
func.return %2 : tensor<4x8xf32> } //===----------------------------------------------------------------------===// // Relu op legalizations. //===----------------------------------------------------------------------===// // ----- // CHECK-LABEL: func @relu func.func @relu(%arg0: tensor<1xi32>) -> tensor<1xi32> { // CHECK: %[[ZERO:.*]] = mhlo.constant dense<0> : tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 335.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/python/tfr_gen_test.py
y = _tfr_quant_raw_data(x) s, z = _tfr_quant_qparam(x) s = _tfr_quant_scale_factor(1.0, [s, s]) s = _tfr_quant_scale_factor(1.0, [s]) y = math_ops.Sub(y, z) qmin, qmax = _tfr_quant_act_range('RELU', 1.0, 0) (qmin, qmax) # pylint: disable=pointless-statement d = _tfr_quant_rescale(y, s, 0) e = math_ops.Cast(x=d, DstT=dtypes.int16) f = math_ops.Cast(x=e, DstT=dtypes.int8) return f
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 13 16:33:28 UTC 2021 - 28.8K bytes - Viewed (0)