- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 339 for relu (0.07 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/convert-tf-quant-types.mlir
// CHECK-LABEL: func @relu_qint8 func.func @relu_qint8(%arg0: tensor<1x!tf_type.qint8>) -> tensor<1x!tf_type.qint8> { // CHECK: %[[X:.*]] = "tf.Relu"(%arg0) : (tensor<1xi8>) -> tensor<1xi8> %0 = "tf.Relu"(%arg0) : (tensor<1x!tf_type.qint8>) -> tensor<1x!tf_type.qint8> func.return %0: tensor<1x!tf_type.qint8> } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 25.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir
%0 = stablehlo.constant dense<2.000000e+00> : tensor<4x2x3x3xf32> // weight %1 = stablehlo.constant dense<3.000000e+00> : tensor<4xf32> // bias %2 = stablehlo.constant dense<0.000000e+00> : tensor<1x4x5x5xf32> // relu %3 = stablehlo.broadcast_in_dim %1, dims = [1] : (tensor<4xf32>) -> tensor<1x4x5x5xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 12.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema.fbs
LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20, RELU6 = 21, RESHAPE = 22, RESIZE_BILINEAR = 23, RNN = 24, SOFTMAX = 25, SPACE_TO_DEPTH = 26,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 49.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir
%2 = "tfl.pseudo_const"() {value = dense<0.000000e+00> : tensor<3xf32>} : () -> tensor<3xf32> %3 = "tfl.conv_2d"(%0, %1, %2) { dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : ( tensor<?x5x5x2xf32>, tensor<3x5x5x2xf32>, tensor<3xf32>) -> tensor<?x1x1x3xf32> %4 = "quantfork.stats"(%3) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 15.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-merge-fusion-with-dequantize -split-input-file -verify-diagnostics | FileCheck %s // Merge fusion with dequantize for relu case. module attributes {tf_saved_model.semantics} { // CHECK-LABEL: func.func private @merge_relu_fusion func.func private @merge_relu_fusion(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 23:45:53 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-prefer-tf2xla.mlir
data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 9.99999974E-5 : f32, explicit_paddings = [], filter_format = "HWIO", fused_ops = ["BiasAdd", "Relu"], leakyrelu_alpha = 2.000000e-01 : f32, num_args = 2 : i64, operandSegmentSizes = array<i32: 1, 1, 2, 2>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 15.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py
padding: str = 'SAME', has_func_alias: bool = False, ) -> module.Module: class ConvModel(module.Module): """A simple model with a single conv2d, bias and relu.""" def __init__(self): self.out_channel_size = filter_shape[-1] # This ensures filters will have different value range per out channel self.filters = np.stack( [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.td
range for the fused activation `act` with the quantization defined by the `scale` and `zero point`. Currently, the allowed activations are `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`. Example: ```mlir %3, %4 = tfr.quant_act_range(%2, %1, %0) : (tfr.attr, float, i64) -> (tfr.tensor, tfr.tensor) ``` }]; let arguments = (ins
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 10:54:29 UTC 2024 - 17.4K bytes - Viewed (0)