Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 111 for RELU (0.03 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_xla.mlir

    // CHECK: %[[cast:.*]] = "tf.Cast"(%[[sub]]) <{Truncate = false}> : (tensor<1x3x2x2xi32>) -> tensor<1x3x2x2xf32>
    // CHECK: %[[dequant1:.*]] = "tf.Mul"(%[[cast]]
    // CHECK: %[[relu:.*]] = "tf.Relu"(%[[dequant1]]
    // CHECK: %[[clamped:.*]] = "tf.Minimum"(%[[relu]]
    
    // CHECK: %[[rescale1:.*]] = "tf.Mul"(%[[cast]]
    // CHECK: %[[add2:.*]] = "tf.AddV2"(%[[rescale1]]
    // CHECK: %[[maximum2:.*]] = "tf.Maximum"(%[[add2]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/compute-cost.mlir

      %1 = "tfl.mul"(%0, %arg1) {fused_activation_function = "RELU", tac.device = "CPU"} : (tensor<10x10x10xf32>, tensor<10xf32>) -> tensor<10x10x10xf32>
      func.return %1 : tensor<10x10x10xf32>
    }
    
    // -----
    
    // CHECK: tac.cost = 0x4B673001
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:29:10 UTC 2022
    - 4.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/graph-undefined-output.pbtxt

        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "shape"
        value {
          shape {
            dim {
              size: 1
            }
          }
        }
      }
    }
    node {
      name: "Relu"
      op: "Relu"
      input: "input"
      device: "/device:CPU:0"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
    }
    versions {
      producer: 27
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 10 23:27:16 UTC 2021
    - 713 bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfr/resources/decomposition_lib.mlir

      %add = tfr.call @tf__add(%dot, %bias) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor
    
      %relu = tfr.constant "relu" -> !tfr.attr
      %relu6 = tfr.constant "relu6" -> !tfr.attr
    
      %is_relu = tfr.equal %act, %relu -> i1
      %res = scf.if %is_relu -> !tfr.tensor {
        %applied_relu = tfr.call @tf__relu(%add) : (!tfr.tensor) -> !tfr.tensor
        scf.yield %applied_relu : !tfr.tensor
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 13 16:33:28 UTC 2021
    - 4.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/basic_lstm.mlir

    // CHECK-LABEL: @main
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/function-func-attr.pbtxt

      op: "custom_embedding_matmul"
    }
    library {
      function {
        signature {
          name: "custom_relu"
        }
        attr {
          key: "_implements"
          value {
            func {
              name: "tensorflow.relu"
            }
          }
        }
      }
      function {
        signature {
          name: "custom_embedding_matmul"
        }
        attr {
          key: "_implements"
          value {
            func {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 01 20:09:54 UTC 2023
    - 1.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-op-cost.mlir

      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU", tac.device = "CPU"} : (tensor<10x10x10xf32>, tensor<10xf32>) -> tensor<10x10x10xf32>
      // CHECK: tac.cost = 1.000000e+03
      %1 = "tfl.mul"(%0, %arg1) {fused_activation_function = "RELU", tac.device = "CPU"} : (tensor<10x10x10xf32>, tensor<10xf32>) -> tensor<10x10x10xf32>
      func.return %1 : tensor<10x10x10xf32>
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:29:10 UTC 2022
    - 5.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      %14 = "tf.AddV2"(%10, %12) : (tensor<?x256x56x56xf32>, tensor<?x256x56x56xf32>) -> tensor<?x256x56x56xf32>
      %15 = "tf.Relu"(%14) : (tensor<?x256x56x56xf32>) -> tensor<?x256x56x56xf32>
    
      // CHECK: %[[ADD:[0-9]*]] = "tf.AddV2"(%[[BATCH_NORM1]], %[[BATCH_NORM2]])
      // CHECK: %[[RELU:[0-9]*]] = "tf.Relu"(%[[ADD]])
    
      // Reduce spatial dimensions
      %16 = "tf.Mean"(%15, %1) : (tensor<?x256x56x56xf32>, tensor<2xi32>) -> tensor<?x256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/device_assignment.mlir

      // CHECK: device = "cpu"
      %2 = "tf.Relu"(%1) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"], device = "cpu"} : (tensor<3x3xf32>) -> tensor<3x3xf32>
      // CHECK: device = "gpu"
      %3 = "tf.Relu"(%2) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"]} : (tensor<3x3xf32>) -> tensor<3x3xf32>
      func.return %3 : tensor<3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:47:26 UTC 2022
    - 924 bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/end2end/conv_2d_nchw.pbtxt

        value {
          s: "NHWC"
        }
      }
    }
    node {
      name: "conv_net_2d_1/Relu"
      op: "Relu"
      input: "conv_net_2d_1/conv_2d_0/BiasAdd"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
    }
    node {
      name: "output_0"
      op: "Identity"
      input: "conv_net_2d_1/Relu"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 03 03:26:13 UTC 2021
    - 3.7K bytes
    - Viewed (0)
Back to top