Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 20 for 8x16xi32 (0.17 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-composite-functions-tf.mlir

    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 122.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

    }
    
    // -----
    
    // CHECK-LABEL: xla_sharding
    func.func @xla_sharding(%arg0: tensor<4x16xf32>) -> tensor<4x16xf32> {
      // CHECK-NEXT: mhlo.custom_call @Sharding(%arg0) {mhlo.sharding = ""}
      %0 = "tf.XlaSharding"(%arg0) {_XlaSharding = "", sharding = ""} : (tensor<4x16xf32>) -> tensor<4x16xf32>
      func.return %0 : tensor<4x16xf32>
    }
    
    // -----
    
    // CHECK-LABEL: inplace_update_one
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      func.return %0 : tensor<8x16xf32>
    
    // CHECK-LABEL:minimum
    // CHECK:  "tfl.minimum"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<8x16xf32>) -> tensor<8x16xf32>
    }
    
    func.func @realDiv(%arg0: tensor<8x16xf32>, %arg1: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.RealDiv"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<8x16xf32>) -> tensor<8x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      // CHECK: return %0
    }
    
    // CHECK-LABEL: testAddOfNegRight
    func.func @testAddOfNegRight(%arg0: tensor<8x16xf32>, %arg1: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Neg"(%arg1) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      %1 = "tf.Add"(%arg0, %0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<8x16xf32>, tensor<8x16xf32>) -> tensor<8x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_legacy.mlir

    }
    
    // CHECK-LABEL: softmax
    func.func @softmax(%arg0: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %0 : tensor<8x16xf32>
    // CHECK: %[[SOFTMAX_0:.*]] = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
    // CHECK: return %[[SOFTMAX_0]] : tensor<8x16xf32>
    }
    
    // CHECK-LABEL: conv2d_backprop_input_with_add
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_default.mlir

    func.func @softmax(%arg0: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %0 : tensor<8x16xf32>
    // CHECK: %[[CUSTOM_0:.*]] = "tfl.custom"(%arg0) <{custom_code = "FlexSoftmax", custom_option = #tfl<const_bytes : "0x07536F66746D617800161207536F66746D61781A002A070A0154120230013200000221191414042801">}> : (tensor<8x16xf32>) -> tensor<8x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/decompose_resource_ops.mlir

        // CHECK: "tf.GatherV2"({{.+}}, {{.+}}, {{.+}}) <{batch_dims = 1 : i64}> : (tensor<2x8x16xi32>, tensor<5xi32>, tensor<i64>) -> tensor<2x5x16xi32>
        %1 = "tf.ResourceGather"(%resource, %indices) {batch_dims = 1} : (tensor<*x!tf_type.resource<tensor<2x8x16xi32>>>, tensor<5xi32>) -> (tensor<2x5x16xi32>)
    
        tf_device.return %1 : tensor<2x5x16xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 51.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

      // CHECK-DAG: %[[VAL0:.+]] = "tfl.pseudo_qconst"() <{qtype = tensor<16x{{.+}}>, value = dense<1> : tensor<16xi32>}>
      // CHECK-DAG: %[[VAL1:.+]] = "tfl.pseudo_qconst"() <{qtype = tensor<16x{{.+}}>, value = dense<2> : tensor<16xi32>}>
      // CHECK-DAG: %[[VAL2:.+]] = "tfl.dequantize"(%[[VAL0]])
      // CHECK-DAG: %[[VAL3:.+]] = "tfl.dequantize"(%[[VAL1]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %14 = stablehlo.subtract %12, %13 : tensor<8x16x16xi32>  // q1 - z1
        %15 = stablehlo.convert %11 : (tensor<8x16x4xi8>) -> tensor<8x16x4xi32>
        %16 = stablehlo.broadcast_in_dim %6, dims = [0, 1, 2] : (tensor<1x1x1xi32>) -> tensor<8x16x4xi32>
        %17 = stablehlo.subtract %15, %16 : tensor<8x16x4xi32>  // q2 - z2
        // Corresponds to einsum expression: b i j, b j d -> b i d
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

      %1 = stablehlo.transpose %arg0, dims = [1, 0] : (tensor<16x8xf32>) -> tensor<8x16xf32>
      %2 = "stablehlo.reduce_window"(%1, %0) ({
      ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):
          %3 = stablehlo.maximum %arg1, %arg2 : tensor<f32>
          stablehlo.return %3 : tensor<f32>
      }) {window_dimensions = array<i64: 2, 2>, window_strides = array<i64: 2, 2>} : (tensor<8x16xf32>, tensor<f32>) -> tensor<4x8xf32>
      return %2 : tensor<4x8xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
Back to top