Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 86 for 1x2xi32 (0.91 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      // Padding in spatial dimension (NCHW)
      %0 = "tf.Const"() {value = dense<[[0, 0], [0, 0], [3, 3], [3, 3]]> : tensor<4x2xi32>} : () -> tensor<4x2xi32>
    
      // Reduce over spatial dimensions (NCHW)
      %1 = "tf.Const"() {value = dense<[2, 3]> : tensor<2xi32>} : () -> tensor<2xi32>
    
      // Transpose input: NHWC -> NCHW
      %2 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tfrt/tests/fuse_tpu_compile_and_execute_ops.mlir

      %1 = "tf.ReadVariableOp"(%0) {device = "/CPU:0"} : (tensor<!tf_type.resource<tensor<1x1xf32>>>) -> tensor<1x1xf32>
      %2:2 = "tf.Split"(%cst, %arg0) {device = "/CPU:0"} : (tensor<i32>,  tensor<1x4xf32>) -> (tensor<1x2xf32>, tensor<1x2xf32>)
      %3:2 = "tf.Split"(%cst, %2#0) {device = "/CPU:0"} : (tensor<i32>,  tensor<1x2xf32>) -> (tensor<1x1xf32>, tensor<1x1xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir

    }
    
    // CHECK-LABEL: quantized_constant
    func.func @quantized_constant(%arg0: tensor<1x2xf32>) -> tensor<2x2xf32> {
      %1 = "tfl.quantize"(%arg0) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>, volatile} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 1.0>>
      %cst = "tfl.pseudo_qconst"() {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>, value = dense<-76> : tensor<1x2xi8>} : () -> tensor<1x2x!quant.uniform<u8:f32, 1.0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir

      %1 = "tf.Const"() {value = dense<[[0, 0], [0, 0], [0, 0], [1, 1], [1, 1]]> : tensor<5x2xi32>} : () -> tensor<5x2xi32>
      %2 = "tf.Transpose"(%arg0, %0) : (tensor<1x2x4x4x3xf32>, tensor<5xi32>) -> tensor<1x2x3x4x4xf32>
      %3 = "tf.Pad"(%2, %1) : (tensor<1x2x3x4x4xf32>, tensor<5x2xi32>) -> tensor<1x2x3x6x6xf32>
      func.return %2, %3 : tensor<1x2x3x4x4xf32>, tensor<1x2x3x6x6xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/lstm.json

    // CHECK-DAG: %[[input_18:.*]] = "quantfork.stats"({{.*}}) <{layerStats = dense<[-8.000000e-01, 1.600000e+00]> : tensor<2xf32>}> : (tensor<1x4xf32>) -> tensor<1x4xf32>
    // CHECK-DAG: %[[input_19:.*]] = "quantfork.stats"({{.*}}) <{layerStats = dense<[-2.000000e+00, 4.000000e+00]> : tensor<2xf32>}> : (tensor<1x2xf32>) -> tensor<1x2xf32>
    
    // CHECK: "tfl.unidirectional_sequence_lstm"({{.*}}, %[[input_18]], %[[input_19]], %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}})
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 06:25:50 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-include-tf2xla-fallback.mlir

      func.return %0#0 : tensor<2xi32>
    }
    
    // CHECK-LABEL: mirror_pad
    func.func @mirror_pad(%arg0: tensor<2x3xcomplex<f64>>) -> tensor<4x7xcomplex<f64>> {
      %0 = mhlo.constant dense<[[1, 1], [2, 2]]> : tensor<2x2xi32>
      // NO_FALLBACK: tf.MirrorPad
      // SUPPORTED_FALLBACK_DEVICE-NOT: tf.MirrorPad
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 16 19:04:03 UTC 2023
    - 3.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions_with_quantization_specs.mlir

    // RUN:   -split-input-file | FileCheck %s --check-prefix=STATIC-RANGE-PTQ-TO-COMPUTE-HEAVY
    
    // STATIC-RANGE-PTQ-TO-COMPUTE-HEAVY: @main
    func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
      %0 = stablehlo.add %arg0, %arg0 : tensor<1x2xf32>
      return %0 : tensor<1x2xf32>
    }
    // Tests that `composite_add_fn_1` does not quantize when quantizing
    // only compute-heavy ops.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 18:09:38 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

    // deferred.
    
    // CHECK-LABEL: add_with_activation_transpose_rank_two
    func.func @add_with_activation_transpose_rank_two(%arg0: tensor<1x2xf32>) -> tensor<2x1xf32> {
      %0 = stablehlo.constant dense<2.000000e+00> : tensor<2x1xf32>
      %1 = stablehlo.transpose %arg0, dims = [1, 0] : (tensor<1x2xf32>) -> tensor<2x1xf32>
      %2 = stablehlo.add %1, %0 : tensor<2x1xf32>
      return %2 : tensor<2x1xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/quantize.mlir

    }
    
    // CHECK-LABEL: QuantizeConcat
    func.func @QuantizeConcat(tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>> {
    ^bb0(%arg0: tensor<1x2xf32>, %arg1: tensor<1x2xf32>):
      %0 = "tfl.concatenation"(%arg0, %arg1) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/xla_broadcast.mlir

      // CHECK-NEXT:     %[[GROUP:.*]] = "tf.Const"()
      // CHECK-SAME:       [0, 1, 2, 3]
      // CHECK-NEXT:     %[[REDUCED:.*]] = "tf.XlaAllReduce"(%[[ID]], %[[GROUP]]) <{mode = "CrossReplica", reduce_op = "Add"}> : (tensor<f32>, tensor<1x4xi32>) -> tensor<f32>
      // CHECK-NEXT:     "tf.OpA"(%[[REDUCED]]) : (tensor<f32>) -> ()
      tf_device.replicate {n = 4 : i32} {
        "tf_device.cluster"() ({
          "tf.OpA"(%arg0) : (tensor<f32>) -> ()
          tf_device.return
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 18:52:07 UTC 2024
    - 2.9K bytes
    - Viewed (0)
Back to top