Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 339 for relu (0.07 sec)

  1. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td

                   (BinBroadcastDimensions $one, $features))))>;
    
    //===----------------------------------------------------------------------===//
    // Relu op patterns.
    //===----------------------------------------------------------------------===//
    
    // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will
    // require HLO canonicalization of min and max on a tensor to ClampOp.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 34.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

      let hasFolder = 1;
    }
    
    def TFL_ReluOp: TFL_Op<"relu", [
        PredOpTrait<"x and y must have same element type",
          TFL_TCresVTEtIsSameAsOp<0, 0>>,
        Pure,
        QuantizableResult,
        SameOperandsAndResultShape]> {
      let summary = "Relu operator";
    
      let description = [{
        Element-wise Relu operator
          x -> max(0, x)
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      // CHECK: %0 = "tf.Relu6"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<4xf32>) -> tensor<4xf32>
      // CHECK: return %0
      %cst_6 = arith.constant dense<6.000000e+00> : tensor<f32>
      %0 = "tf.Minimum"(%arg0, %cst_6) : (tensor<4xf32>, tensor<f32>) -> tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/ops.mlir

      // CHECK: "RELU"
      %1 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU"} : tensor<4xi32>
      // CHECK: "RELU_N1_TO_1"
      %2 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU_N1_TO_1"} : tensor<4xi32>
      // CHECK: "RELU6"
      %3 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU6"} : tensor<4xi32>
      // CHECK: "TANH"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

          dilations: Sequence[int] = (1, 1, 1, 1),
          padding: str = 'SAME',
      ):
        class DepthwiseConvModel(module.Module):
          """A simple model with a single depthwise conv2d, bias and relu."""
    
          def __init__(self):
            self.out_channel_size = filter_shape[2] * filter_shape[3]
    
            # This ensures filters will have different value range per out channel
            self.filters = np.stack(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

        %1 = "tf.Relu"(%0) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32>
        return %1 : tensor<1x3x2x2xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/flatbuffer_operator.cc

      return llvm::StringSwitch<tflite::ActivationFunctionType>(str)
          .Case("NONE", tflite::ActivationFunctionType_NONE)
          .Case("RELU", tflite::ActivationFunctionType_RELU)
          .Case("RELU_N1_TO_1", tflite::ActivationFunctionType_RELU_N1_TO_1)
          .Case("RELU6", tflite::ActivationFunctionType_RELU6)
          .Case("TANH", tflite::ActivationFunctionType_TANH)
          .Case("SIGN_BIT", tflite::ActivationFunctionType_SIGN_BIT);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 38K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

      %5 = "quantfork.stats"(%4) {layerStats = dense<[-56.2916565, 122.922478]> : tensor<2xf32>} : (tensor<1x4xf32>) -> tensor<1x4xf32>
      %6 = "tfl.svdf"(%0, %1, %2, %3, %5) {fused_activation_function = "RELU", rank = 1 : i32} : (tensor<1x3xf32>, tensor<2x3xf32>, tensor<2x1xf32>, tensor<2xf32>, tensor<1x4xf32>) -> tensor<1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  9. src/cmd/go/internal/toolchain/toolchain_test.go

    	{"1.38.1", rel2, "go1.39.2"},
    	{"1.38.1", relRC, "go1.39.2"},
    	{"1.39", rel0, "go1.39.2"},
    	{"1.39", rel2, "go1.39.2"},
    	{"1.39", relRC, "go1.39.2"},
    	{"1.39.2", rel0, "go1.39.2"},
    	{"1.39.2", rel2, "go1.39.2"},
    	{"1.39.2", relRC, "go1.39.2"},
    	{"1.39.3", rel0, "go1.40.0"},
    	{"1.39.3", rel2, "go1.40.2"},
    	{"1.39.3", relRC, "go1.40.2"},
    	{"1.40", rel0, "go1.40.0"},
    	{"1.40", rel2, "go1.40.2"},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 30 19:11:44 UTC 2023
    - 1.8K bytes
    - Viewed (0)
  10. platforms/core-configuration/kotlin-dsl/doc/c4/lib/C4.puml

    ' ##################################
    
    !define Rel_(e_alias1, e_alias2, e_label, e_direction="") e_alias1 e_direction e_alias2 : "===e_label"
    !define Rel_(e_alias1, e_alias2, e_label, e_techn, e_direction="") e_alias1 e_direction e_alias2 : "===e_label\n//<size:TECHN_FONT_SIZE>[e_techn]</size>//"
    
    !define Rel(e_from,e_to, e_label) Rel_(e_from,e_to, e_label, "-->")
    !define Rel(e_from,e_to, e_label, e_techn) Rel_(e_from,e_to, e_label, e_techn, "-->")
    
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Wed Aug 02 08:06:49 UTC 2023
    - 3.6K bytes
    - Viewed (0)
Back to top