Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for RELU (0.04 sec)

  1. tensorflow/compiler/jit/mark_for_compilation_pass_test.cc

        Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B"));
        Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C"));
        Node* d =
            ops::UnaryOp("UncompilableUnary", c, builder.opts().WithName("D"));
        Node* e = ops::UnaryOp("Relu", d, builder.opts().WithName("E"));
        ops::UnaryOp("Relu", e, builder.opts().WithName("F"));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 10:11:10 UTC 2024
    - 79.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

          # If present the last op before return should be stablehlo.clamp for relu6
          # and stablehlo.maximum for relu.
          if activation_fn is nn_ops.relu6:
            self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return')
          elif activation_fn is nn_ops.relu:
            self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return')
        else:
          # Check activation functions are implicit.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

         (HasRankAtMost<4> $a),
         (HasRankAtMost<4> $b)]>;
    }
    
    // We can eliminate Relu from Relu(SquaredDifference(x, y)),
    // since the result of SquaredDifference is always non-negative.
    // TFLite interpreter doesn't support Relu+int32 for now. So the test cases
    // are failing without the following pattern to optimize Relu away fixes
    // the problem.
    def OptimizeReluSquaredDifference : Pat<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir

        %10 = "tf.Cast"(%9) {Truncate = false, device = ""} : (tensor<1x3xi32>) -> tensor<1x3xf32>
        %11 = "tf.Mul"(%10, %cst) {device = ""} : (tensor<1x3xf32>, tensor<f32>) -> tensor<1x3xf32>
        %12 = "tf.Relu"(%11) {device = ""} : (tensor<1x3xf32>) -> tensor<1x3xf32>
        return %12 : tensor<1x3xf32>
      }
    // CHECK-LABEL: func @matmul_with_relu
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 81K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir

    // CHECK: return %[[MAX]] : tensor<?x28x28x16xf32>
    // CHECK: }
    
    // -----
    
    // Because the operand of shape_of is other than the target conv,
    // should not match conv relu dynamic pattern.
    
    // CHECK-LABEL: @conv_with_relu_dynamic_shape_not_same_op_fn(
    // CHECK-SAME:                    %[[ARG_0:.*]]: tensor<?x28x28x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 49.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      %3 = "tfl.reshape"(%1, %2) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1x128x128xf32>, tensor<2xi32>) -> tensor<128x128xf32>
      %4 = "tfl.relu"(%3) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<128x128xf32>) -> tensor<128x128xf32>
      %5 = "tfl.pseudo_const"() {value = dense<[1, 128, 128]> : tensor<3xi32>} : () -> tensor<3xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

          dilations: Sequence[int] = (1, 1, 1, 1),
          padding: str = 'SAME',
      ):
        class DepthwiseConvModel(module.Module):
          """A simple model with a single depthwise conv2d, bias and relu."""
    
          def __init__(self):
            self.out_channel_size = filter_shape[2] * filter_shape[3]
    
            # This ensures filters will have different value range per out channel
            self.filters = np.stack(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

      %5 = "quantfork.stats"(%4) {layerStats = dense<[-56.2916565, 122.922478]> : tensor<2xf32>} : (tensor<1x4xf32>) -> tensor<1x4xf32>
      %6 = "tfl.svdf"(%0, %1, %2, %3, %5) {fused_activation_function = "RELU", rank = 1 : i32} : (tensor<1x3xf32>, tensor<2x3xf32>, tensor<2x1xf32>, tensor<2xf32>, tensor<1x4xf32>) -> tensor<1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir

    // CHECK-PER-TENSOR: return %[[UNIFORM_QUANTIZE_0]] : tensor<?x3x4x2x!quant.uniform<i8:f32, {{.*}}>>
    
    // -----
    
    // Tests that fused pattern for convolution + bias + relu with
    // dynamic batch dimension is properly quantized.
    
    // Note that this checks for identical condition as
    // quantize_conv_with_bias_dynamic_fn, omitting stablehlo.maximum.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 91.6K bytes
    - Viewed (0)
Back to top