Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for 8x16xf16 (0.54 sec)

  1. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

      %0 = "tf.Softplus"(%arg0) : (tensor<8x16xf16>) -> tensor<8x16xf16>
    
      // CHECK:     return [[ENTRY_SELECT]] : tensor<8x16xf16>
      func.return %0 : tensor<8x16xf16>
    }
    
    // -----
    
    // CHECK-LABEL: func @softplus_bf16
    // CHECK-SAME: ([[FEATURES:%.*]]: tensor<8x16xbf16>)
    func.func @softplus_bf16(%arg0: tensor<8x16xbf16>) -> tensor<8x16xbf16> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      func.return %0 : tensor<8x16xf32>
    
    // CHECK-LABEL: floor_div
    // CHECK:  tfl.floor_div %arg0, %arg1 : tensor<8x16xf32>
    // CHECK:  return
    }
    
    func.func @floor_div_i16(tensor<8x16xi16>, tensor<8x16xi16>) -> tensor<8x16xi16> {
    ^bb0(%arg0: tensor<8x16xi16>, %arg1: tensor<8x16xi16>):
      %0 = "tf.FloorDiv"(%arg0, %arg1) : (tensor<8x16xi16>, tensor<8x16xi16>) -> tensor<8x16xi16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      // CHECK: return %0
    }
    
    // CHECK-LABEL: testAddOfNegRight
    func.func @testAddOfNegRight(%arg0: tensor<8x16xf32>, %arg1: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Neg"(%arg1) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      %1 = "tf.Add"(%arg0, %0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<8x16xf32>, tensor<8x16xf32>) -> tensor<8x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_legacy.mlir

    }
    
    // CHECK-LABEL: softmax
    func.func @softmax(%arg0: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %0 : tensor<8x16xf32>
    // CHECK: %[[SOFTMAX_0:.*]] = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
    // CHECK: return %[[SOFTMAX_0]] : tensor<8x16xf32>
    }
    
    // CHECK-LABEL: conv2d_backprop_input_with_add
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

      // CHECK: %[[DQ_2:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_3:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_4:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_5:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_default.mlir

    func.func @softmax(%arg0: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %0 : tensor<8x16xf32>
    // CHECK: %[[CUSTOM_0:.*]] = "tfl.custom"(%arg0) <{custom_code = "FlexSoftmax", custom_option = #tfl<const_bytes : "0x07536F66746D617800161207536F66746D61781A002A070A0154120230013200000221191414042801">}> : (tensor<8x16xf32>) -> tensor<8x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/optimize_no_verify.mlir

      %0 = "tfl.conv_2d"(%arg0, %arg1, %cst_0) {dilation_h_factor = 2 : i32, dilation_w_factor = 3 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 4 : i32, stride_w = 5 : i32} : (tensor<256x32x32x3xf16>, tensor<16x3x3x3xf16>, tensor<16xf16>) -> tensor<256x8x7x16xf16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir

      func.return %0 : tensor<8x16xf32>
    }
    
    // -----
    
    func.func @testCumprod(%arg: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %axis = arith.constant dense<-3> : tensor<i32>
      // expected-error @+1 {{axis operand should be within range [-2, 2)}}
      %0 = "tf.Cumprod"(%arg, %axis) : (tensor<8x16xf32>, tensor<i32>) -> tensor<8x16xf32>
      func.return %0 : tensor<8x16xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 23 14:40:35 UTC 2023
    - 236.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

      %1 = stablehlo.transpose %arg0, dims = [1, 0] : (tensor<16x8xf32>) -> tensor<8x16xf32>
      %2 = "stablehlo.reduce_window"(%1, %0) ({
      ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):
          %3 = stablehlo.maximum %arg1, %arg2 : tensor<f32>
          stablehlo.return %3 : tensor<f32>
      }) {window_dimensions = array<i64: 2, 2>, window_strides = array<i64: 2, 2>} : (tensor<8x16xf32>, tensor<f32>) -> tensor<4x8xf32>
      return %2 : tensor<4x8xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

    // CHECK: %[[UQ_1:.*]] = stablehlo.uniform_quantize %[[ARG_1]] : (tensor<8x16x4xf32>) -> tensor<8x16x4x!quant.uniform<i8:f32, 2.500000e+00>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
Back to top