Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 29 of 29 for 1x1x1x96xf32 (0.15 sec)

  1. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_default.mlir

    func.func @depth_to_space(%arg0: tensor<1x1x1x4xf32>) -> tensor<1x2x2x1xf32> {
      %0 = "tf.DepthToSpace"(%arg0) {block_size = 2: i64,  data_format = "NHWC"}: (tensor<1x1x1x4xf32>) -> tensor<1x2x2x1xf32>
      func.return %0 : tensor<1x2x2x1xf32>
    // CHECK: %[[CUSTOM_0:.*]] = "tfl.custom"(%arg0) <{custom_code = "FlexDepthToSpace", custom_option = #tfl<const_bytes : "{{.*}}">}> : (tensor<1x1x1x4xf32>) -> tensor<1x2x2x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

    // CustomOpNotWeightOnly-LABEL: QuantizeCustomOp
    func.func @QuantizeCustomOp(%arg0: tensor<1x1x1x1xf32>) -> tensor<*xf32> attributes {tf.entry_function = {inputs = "input", outputs = "custom_op"}} {
      %0 = "quantfork.stats"(%arg0) {layerStats = dense<[0.000000e+00, 2.550000e+02]> : tensor<2xf32>} : (tensor<1x1x1x1xf32>) -> tensor<1x1x1x1xf32>
      %w = arith.constant dense<127.0> : tensor<1024x1x1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/ops.mlir

    func.func @testSpaceToDepthF32(%arg0: tensor<1x2x2x1xf32>) -> tensor<1x1x1x4xf32> {
      // CHECK: %[[ARG:.*]]: tensor<1x2x2x1xf32>
      // CHECK: "tfl.space_to_depth"(%[[ARG]]) <{block_size = 2 : i32}> : (tensor<1x2x2x1xf32>) -> tensor<1x1x1x4xf32>
      %0 = "tfl.space_to_depth"(%arg0) {block_size = 2: i32} : (tensor<1x2x2x1xf32>) -> tensor<1x1x1x4xf32>
      func.return %0 : tensor<1x1x1x4xf32>
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/optimize.mlir

    // CHECK-LABEL: @NotReorderReshapeAddIfNotTailingDimAfter
    func.func @NotReorderReshapeAddIfNotTailingDimAfter(%arg0: tensor<1x30x1x96xf32>) -> tensor<1x30x96xf32> {
      %cst = arith.constant dense<2.0> : tensor<1x30x96xf32>
      %shape = arith.constant dense<[1, 30, 96]> : tensor<3xi32>
      %1 = "tfl.reshape"(%arg0, %shape) : (tensor<1x30x1x96xf32>, tensor<3xi32>) -> tensor<1x30x96xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

        } : (tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32>
      func.return %1 : tensor<1x1x1x16xf32>
    
    // CHECK: %0 = "tfl.dequantize"(%arg0)
    // CHECK: %1 = "tfl.average_pool_2d"(%0)
    // CHECK: %2 = "tfl.quantize"(%1)
    // CHECK: %3 = "tfl.dequantize"(%2)
    // CHECK: return %3 : tensor<1x1x1x16xf32>
    }
    
    // CHECK-LABEL: QuantizeMaximum
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir

      func.func @simple_folding(%arg0: tensor<1x1x1x1xi32>, %arg1: tensor<1x1x1x1xf32>) -> tensor<?x?x?x?xf32> {
        // CHECK: %[[SHAPE:.*]] = "tf.Shape"
        // CHECK: %[[CONV:.*]] = "tf.Conv2DBackpropInput"(%[[SHAPE]]
        // CHECK-SAME: (tensor<4xi32>, tensor<1x1x1x1xf32>, tensor<1x1x1x1xf32>) -> tensor<1x1x1x1xf32>
        // CHECK: return %[[CONV]] : tensor<1x1x1x1xf32>
        %0 = "tf.Shape"(%arg0) : (tensor<1x1x1x1xi32>) -> tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 17:24:10 UTC 2024
    - 167.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

      // Move binary op batched RHS before reshape:
      // binary(reshape(lhs), rhs) => reshape(binary(lhs, flatten(rhs)))
      // Pattern targetted here is as follows-
      // [input, lhr, rhs] == [<1x1024x128>, <1x1024x8x16>, <1x1x8x16xf32>]
      // This is valid only when the-
      // 1.last dimension of lhs is equal to the number of elements in constant rhs.
      // 2.Reduded shape of rhs, here <8x16> is equal to last dimensions of lhs.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    // CHECK:           %[[VAL_2:.*]] = "tf.Reshape"(%[[VAL_0]], %[[VAL_1]]) : (tensor<3x1x16xf32>, tensor<4xi64>) -> tensor<3x1x1x16xf32>
    // CHECK:           %[[VAL_3:.*]] = arith.constant dense<[3, 8, 8, 16]> : tensor<4xi64>
    // CHECK:           %[[VAL_4:.*]] = "tf.BroadcastTo"(%[[VAL_2]], %[[VAL_3]]) : (tensor<3x1x1x16xf32>, tensor<4xi64>) -> tensor<3x8x8x16xf32>
    // CHECK:           return %[[VAL_4]] : tensor<3x8x8x16xf32>
    // CHECK:         }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

    }
    
    // -----
    
    // CHECK-LABEL: @conv2d_backprop_filter_grouped
    func.func @conv2d_backprop_filter_grouped(
        %input: tensor<1x2x2x2xf32>,
        %out_backprop: tensor<1x1x1x2xf32>
      ) -> tensor<2x2x1x2xf32> {
    
      // CHECK: mhlo.convolution(%arg0, %arg1)
      // CHECK-SAME:  batch_group_count = 2 : i64
      // CHECK-SAME:  feature_group_count = 1 : i64
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
Back to top