Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 25 of 25 for 1x80xf32 (0.19 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

      %9 = "mhlo.compare"(%0, %8) {comparison_direction = #mhlo<comparison_direction NE>} : (tensor<10x8xf32>, tensor<10x8xf32>) -> tensor<10x8xi1>
      %10 = mhlo.and %7, %9 : tensor<10x8xi1>
      %11 = mhlo.subtract %arg0, %6 : tensor<10x8xf32>
      %12 = mhlo.divide %11, %5 : tensor<10x8xf32>
      %13 = mhlo.add %12, %3 : tensor<10x8xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

        // CHECK-NEXT: return %[[DIV]] : tensor<4x10xf32>
        %0 = "tf.Softsign"(%arg0) : (tensor<4x10xf32>) -> tensor<4x10xf32>
        func.return %0 : tensor<4x10xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @softsign_grad
    func.func @softsign_grad(%arg0: tensor<4x10xf32>, %arg1: tensor<4x10xf32>) -> tensor<4x10xf32> {
    
        // CHECK-NEXT: %[[ONE:.*]] = mhlo.constant dense<1.000000e+00> : tensor<f32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

        } else {
          // Recurse on the subtypes in the variant/resource. Basically if the input
          // were:
          //   tensor<!tf_type.variant<tensor<?x8xf32>>>
          // and:
          //   tensor<!tf_type.variant<tensor<10x8xf32>>>
          // we'll try here to refine tensor<?x8xf32> with tensor<10x8xf32>.
          auto refined_subtype = mlir::cast<TensorType>(
              TypeMeet(lhs_element_type_with_subtype.GetSubtypes().front(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td

        ```mlir
          %0 = "tf.Const"() {value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
          %1 = "tf.Const"() {device = "", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
          %2 = "tf.Const"() {device = "baz", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
        ```
    
        then running this pass with 'default-device=foobar', we get:
    
        ```mlir
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 99.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

    mlir_module = '''python
    func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
       %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
       return %ret : tensor<10x10xf32>
    }
    '''
    
    @tf.function
    def foo(x, y):
      return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top