Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 138 for relu6 (0.11 sec)

  1. tensorflow/compiler/mlir/tfr/tests/decompose.mlir

      %relu_attr = tfr.constant "RELU" -> !tfr.attr
      %relu6_attr = tfr.constant "RELU6" -> !tfr.attr
      %reluN1_1_attr = tfr.constant "RELU_N1_TO_1" -> !tfr.attr
      %none:2 = "tfr.quant_act_range"(%none_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
      %relu:2 = "tfr.quant_act_range"(%relu_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 16.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir

        %1 = "tf.Relu6"(%0) : (tensor<*xf32>) -> tensor<*xf32>
        func.return %1 : tensor<*xf32>
      }
    }
    
    // CalibrationOptions(calibration_method=CALIBRATION_METHOD_MIN_MAX)
    // MIN-MAX-CHECK: func @wrap_composite_func
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 32.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/optimize.mlir

    // Fusing:  %[[relu:[0-9].*]] = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
    // Fusing:  %[[add2:[0-9].*]] = tfl.add %[[relu]], %[[add1]] {fused_activation_function = "RELU6"} : tensor<1xf32>
    // Fusing:  %[[add3:[0-9].*]] = tfl.add %[[add2]], %[[relu]] {fused_activation_function = "RELU6"} : tensor<1xf32>
    // Fusing:  return
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfr/examples/mnist/mnist_ops_test.py

            'input_': input_,
            'filter_': filter_,
            'bias': bias,
            'stride_w': 2,
            'stride_h': 2,
            'dilation_w': 1,
            'dilation_h': 1,
            'padding': 'SAME',
            'act': 'RELU6'
        }
    
        self._assertOpAndComposite([input_, filter_, bias],
                                   tf.function(gen_mnist_ops.new_conv2d),
                                   ops_defs._composite_conv_add_relu, kwargs)
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq_per_channel.mlir

        %6 = "quantfork.stats"(%5) {layerStats = dense<[-2788.31055, 4616.62842]> : tensor<2xf32>} : (tensor<*xf32>) -> tensor<*xf32>
        %7 = "tf.Relu6"(%6) {device = ""} : (tensor<*xf32>) -> tensor<*xf32>
        %8 = "quantfork.stats"(%7) {layerStats = dense<[0.000000e+00, 6.000000e+00]> : tensor<2xf32>} : (tensor<*xf32>) -> tensor<*xf32>
        return %8 : tensor<*xf32>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 4.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td

    // These should match the ActivationFunctionType enum in TFLite schema.
    def TFL_AFEnum_None  : I32EnumAttrCase<"NONE", 0>;
    def TFL_AFEnum_Relu  : I32EnumAttrCase<"RELU", 1>;
    def TFL_AFEnum_Relu1 : I32EnumAttrCase<"RELU_N1_TO_1", 2>;
    def TFL_AFEnum_Relu6 : I32EnumAttrCase<"RELU6", 3>;
    def TFL_AFEnum_Tanh  : I32EnumAttrCase<"TANH", 4>;
    def TFL_AFEnum_Sign  : I32EnumAttrCase<"SIGN_BIT", 5>;
    
    def TFL_AFAttr : TFL_AnyStrAttrOf<[
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 20 00:05:24 UTC 2022
    - 6.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir

      %biasadd = "tf.BiasAdd"(%conv, %dq_bias) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
      %res = "tf.Relu6"(%biasadd) : (tensor<*xf32>) -> tensor<*xf32>
      %q_res = "quantfork.qcast"(%res) : (tensor<*xf32>) -> tensor<*x!quant.uniform<i8:f32, 0.023529411764705882:-128>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
       // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %1 = "tfl.add"(%arg0, %0) {fused_activation_function = "RELU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
       // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc

    // tfl.log / tfl.logistic / tfl.max_pool_2d / tfl.mirror_pad / tfl.maximum /
    // tfl.custom / tfl.mean / tfl.minimum / tfl.pad / tfl.pow / tfl.prelu /
    // tfl.relu / tfl.relu6 / tfl.rsqrt / tfl.sin / tfl.slice / tfl.softmax /
    // tfl.space_to_depth / tfl.sqrt / tfl.square / tfl.squared_difference /
    // tfl.strided_slice / tfl.tanh / tfl.transpose / tfl.transpose_conv
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 7.8K bytes
    - Viewed (0)
Back to top