Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 51 for _relu6_ (0.32 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter_test.cc

      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6",  per_device_costs = {CPU = 5.0 : f32, GPU = 1.0 : f32}, tac.device = "GPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      %1 = "tfl.mul"(%0, %arg2) {fused_activation_function = "RELU6", per_device_costs = {CPU = 5.0 : f32, GPU = 1.0 : f32}, tac.device = "GPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 06:11:34 UTC 2024
    - 6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

              'input_shape_dynamic': False,
              'enable_per_channel_quantization': False,
              'dilations': [1, 2, 2, 1],
          },
          {
              'testcase_name': 'relu6',
              'activation_fn': nn_ops.relu6,
              'has_bias': False,
              'has_batch_norm': False,
              'target_opset': quant_opts_pb2.TF,
              'input_shape_dynamic': False,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla.mlir

        %1 = "tf.BiasAdd"(%0, %cst) {data_format = "NHWC", device = ""} : (tensor<1x2x2x3xf32>, tensor<3xf32>) -> tensor<1x2x2x3xf32>
        %2 = "tf.Relu6"(%1) {device = ""} : (tensor<1x2x2x3xf32>) -> tensor<1x2x2x3xf32>
        %3 = "tf.Identity"(%2) {device = ""} : (tensor<1x2x2x3xf32>) -> tensor<1x2x2x3xf32>
        return %3 : tensor<1x2x2x3xf32>
      }
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

      // CHECK: %[[VAL_0:.*]] = "tf._FusedConv2D"(%arg2, %arg1, %arg0) <{data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 0.000000e+00 : f32, explicit_paddings = [], fused_ops = ["BiasAdd", "Relu6"], num_args = 1 : i64, operandSegmentSizes = array<i32: 1, 1, 1, 0>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> {TArgs = [f32]} : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>, tensor<128xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      %1 = "tfl.mul"(%0, %arg2) {tac.device = "GPU", fused_activation_function = "RELU6", tac.inference_type = "FLOAT"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

        )
    
        if merge_fusion_with_dequantize:
          # Check activation functions are explicitly present.
          # If present the last op before return should be stablehlo.clamp for relu6
          # and stablehlo.maximum for relu.
          if activation_fn is nn_ops.relu6:
            self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return')
          elif activation_fn is nn_ops.relu:
            self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return')
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir

        %1 = "tf.Relu6"(%0) : (tensor<*xf32>) -> tensor<*xf32>
        func.return %1 : tensor<*xf32>
      }
    }
    
    // CalibrationOptions(calibration_method=CALIBRATION_METHOD_MIN_MAX)
    // MIN-MAX-CHECK: func @wrap_composite_func
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 32.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/ops.mlir

      %0 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU6"} : tensor<? x i16>
      func.return %0#0 : tensor<? x i16>
    }
    
    // CHECK-LABEL: testSub
    func.func @testSub(tensor<? x i32>, tensor<? x i32>) -> tensor<? x i32> {
    ^bb0(%arg0: tensor<? x i32>, %arg1: tensor<? x i32>):
      // CHECK: tfl.sub %arg0, %arg1 {fused_activation_function = "RELU6"}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

          {"quantized_ops": ["${main_op}", "BiasAdd", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "!tf_type.qint8"},
          {"quantized_ops": ["${main_op}", "BiasAdd", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "!tf_type.qint8"},
        ]
        func.func @GenerateQuantizedFunctionName(${quantized_ops})(%input : tensor<*x!tf_type.qint8>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfr/tests/decompose.mlir

      %relu_attr = tfr.constant "RELU" -> !tfr.attr
      %relu6_attr = tfr.constant "RELU6" -> !tfr.attr
      %reluN1_1_attr = tfr.constant "RELU_N1_TO_1" -> !tfr.attr
      %none:2 = "tfr.quant_act_range"(%none_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
      %relu:2 = "tfr.quant_act_range"(%relu_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 16.7K bytes
    - Viewed (0)
Back to top