Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 31 for RELU (0.09 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

      } : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
      %4 = "tf.BiasAdd"(%3, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
      %5 = "tf.Relu"(%4) {device = ""} : (tensor<*xf32>) -> tensor<*xf32>
    
    
      %6 = "tf.Conv2D"(%arg0, %arg1) {
        data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir

    // CHECK-SAME: f = @composite_conv3d_fn_1}>
    // CHECK-NOT: {_tfl_quant_trait = "fully_quantizable"
    // CHECK: %[[RELU:.*]] = "tf.Relu"(%[[PARTITIONEDCALL_0]])
    // CHECK: return %[[RELU]]
    
    // CHECK-LABEL: private @composite_conv3d_fn_1
    
    // WEIGHTONLY-DAG: %[[CST:.*]] = "tf.Const"() {{.*}} : () -> tensor<2x3x3x3x2xf32>
    // WEIGHTONLY: %[[PARTITIONEDCALL_0:.*]] = "tf.PartitionedCall"(%arg0, %[[CST]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/end2end/back2back_fake_quant.pbtxt

        key: "T"
        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "data_format"
        value {
          s: "NHWC"
        }
      }
    }
    node {
      name: "sequential/quant_dense/Relu"
      op: "Relu"
      input: "sequential/quant_dense/BiasAdd"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
    }
    node {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 15 19:42:47 UTC 2021
    - 25.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

        parameters[
          {"quantized_ops": ["${main_op}", "BiasAdd"], "act_func": "internal_requantize_no_activation_fn", "output_type": "!tf_type.qint8"},
          {"quantized_ops": ["${main_op}", "BiasAdd", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "!tf_type.qint8"},
          {"quantized_ops": ["${main_op}", "BiasAdd", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "!tf_type.qint8"},
        ]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  5. tensorflow/cc/gradients/nn_grad_test.cc

      RunTest(x, x_init_value, y, shape);
    }
    
    TEST_F(NNGradTest, ReluGrad) {
      TensorShape shape({5, 2});
      auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
      auto y = Relu(scope_, x);
      // Avoid input values where ReLU gradient is not well defined (around zero).
      Tensor x_init_value = test::AsTensor<float>(
          {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
          {5, 2});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 22 20:45:22 UTC 2022
    - 15K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td

    // Canonicalize tf.Maximum of zero to tf.Relu
    //===----------------------------------------------------------------------===//
    
    def IsInteger32Pred: CPred<
      "getElementTypeOrSelf($0.getType()).isInteger(32)">;
    
    // Whether the transformation is compatible with the device if given.
    // Currently, Relu with int32 is not supported on GPU.
    def IsDeviceCompatible: Constraint<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 06 18:42:28 UTC 2023
    - 17K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

      // CHECK: %[[VAL_0:.*]] = "tf._FusedConv2D"(%arg2, %arg1, %arg0) <{data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 0.000000e+00 : f32, explicit_paddings = [], fused_ops = ["BiasAdd", "Relu"], num_args = 1 : i64, operandSegmentSizes = array<i32: 1, 1, 1, 0>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> {TArgs = [f32]} : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>, tensor<128xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
      } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32>
      %conv2 = "tfl.conv_2d"(%0, %w, %b2) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfr/tests/decompose.mlir

      %none_attr = tfr.constant "NONE" -> !tfr.attr
      %relu_attr = tfr.constant "RELU" -> !tfr.attr
      %relu6_attr = tfr.constant "RELU6" -> !tfr.attr
      %reluN1_1_attr = tfr.constant "RELU_N1_TO_1" -> !tfr.attr
      %none:2 = "tfr.quant_act_range"(%none_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
      %relu:2 = "tfr.quant_act_range"(%relu_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 16.7K bytes
    - Viewed (0)
Back to top