Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 79 for RELU (0.04 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir

      %0 = stablehlo.constant dense<2.000000e+00> : tensor<4x2x3x3xf32>  // weight
      %1 = stablehlo.constant dense<3.000000e+00> : tensor<4xf32>  // bias
      %2 = stablehlo.constant dense<0.000000e+00> : tensor<1x4x5x5xf32>  // relu
      %3 = stablehlo.broadcast_in_dim %1, dims = [1] : (tensor<4xf32>) -> tensor<1x4x5x5xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

      HASHTABLE_LOOKUP = 10,
      L2_NORMALIZATION = 11,
      L2_POOL_2D = 12,
      LOCAL_RESPONSE_NORMALIZATION = 13,
      LOGISTIC = 14,
      LSH_PROJECTION = 15,
      LSTM = 16,
      MAX_POOL_2D = 17,
      MUL = 18,
      RELU = 19,
      // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
      // since different model developers use RELU1 in different ways. Never
      // create another op called RELU1.
      RELU_N1_TO_1 = 20,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %2 = "tfl.pseudo_const"() {value = dense<0.000000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
      %3 = "tfl.conv_2d"(%0, %1, %2) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32,
        fused_activation_function = "RELU", padding = "VALID",
        stride_h = 1 : i32, stride_w = 1 : i32} : (
          tensor<?x5x5x2xf32>, tensor<3x5x5x2xf32>, tensor<3xf32>) -> tensor<?x1x1x3xf32>
      %4 = "quantfork.stats"(%3) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir

    // CHECK: return %[[MAX]] : tensor<?x28x28x16xf32>
    // CHECK: }
    
    // -----
    
    // Because the operand of shape_of is other than the target conv,
    // should not match conv relu dynamic pattern.
    
    // CHECK-LABEL: @conv_with_relu_dynamic_shape_not_same_op_fn(
    // CHECK-SAME:                    %[[ARG_0:.*]]: tensor<?x28x28x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 49.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-prefer-tf2xla.mlir

        data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 9.99999974E-5 : f32, explicit_paddings = [], filter_format = "HWIO", fused_ops = ["BiasAdd", "Relu"], leakyrelu_alpha = 2.000000e-01 : f32, num_args = 2 : i64, operandSegmentSizes = array<i32: 1, 1, 2, 2>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 15.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      %3 = "tfl.reshape"(%1, %2) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1x128x128xf32>, tensor<2xi32>) -> tensor<128x128xf32>
      %4 = "tfl.relu"(%3) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<128x128xf32>) -> tensor<128x128xf32>
      %5 = "tfl.pseudo_const"() {value = dense<[1, 128, 128]> : tensor<3xi32>} : () -> tensor<3xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir

    // RUN: stablehlo-quant-opt %s -stablehlo-merge-fusion-with-dequantize -split-input-file -verify-diagnostics | FileCheck %s
    
    // Merge fusion with dequantize for relu case.
    
    module attributes {tf_saved_model.semantics} {
      // CHECK-LABEL: func.func private @merge_relu_fusion
      func.func private @merge_relu_fusion(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 23:45:53 UTC 2024
    - 14K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/schema/schema.fbs

      HASHTABLE_LOOKUP = 10,
      L2_NORMALIZATION = 11,
      L2_POOL_2D = 12,
      LOCAL_RESPONSE_NORMALIZATION = 13,
      LOGISTIC = 14,
      LSH_PROJECTION = 15,
      LSTM = 16,
      MAX_POOL_2D = 17,
      MUL = 18,
      RELU = 19,
      // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
      // since different model developers use RELU1 in different ways. Never
      // create another op called RELU1.
      RELU_N1_TO_1 = 20,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

          padding: str = 'SAME',
          has_func_alias: bool = False,
      ) -> module.Module:
        class ConvModel(module.Module):
          """A simple model with a single conv2d, bias and relu."""
    
          def __init__(self):
            self.out_channel_size = filter_shape[-1]
    
            # This ensures filters will have different value range per out channel
            self.filters = np.stack(
                [
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfr/ir/tfr_ops.td

       range for the fused activation `act` with the quantization defined by the
       `scale` and `zero point`. Currently, the allowed activations are
       `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`.
    
        Example:
    
        ```mlir
        %3, %4 = tfr.quant_act_range(%2, %1, %0) :
            (tfr.attr, float, i64) -> (tfr.tensor, tfr.tensor)
        ```
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 10:54:29 UTC 2024
    - 17.4K bytes
    - Viewed (0)
Back to top