Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 36 for 256x1xf32 (0.14 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

    func.func @add_with_activation_transpose_rank_two(%arg0: tensor<1x2xf32>) -> tensor<2x1xf32> {
      %0 = stablehlo.constant dense<2.000000e+00> : tensor<2x1xf32>
      %1 = stablehlo.transpose %arg0, dims = [1, 0] : (tensor<1x2xf32>) -> tensor<2x1xf32>
      %2 = stablehlo.add %1, %0 : tensor<2x1xf32>
      return %2 : tensor<2x1xf32>
    }
    // CHECK: %[[TRANSPOSE_0:.+]] = stablehlo.transpose
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nhwc.mlir

      func.return %y : tensor<1x64x28x28xf32>
    }
    
    // CHECK-LABEL: bias_add_nchw
    func.func @bias_add_nchw(%arg0: tensor<1x256x150x150xf32>, %arg1: tensor<256xf32>) -> tensor<1x256x150x150xf32> {
      // CHECK: (%[[ARG0:.*]]: tensor<1x256x150x150xf32>, %[[ARG1:.*]]: tensor<256xf32>)
      // CHECK: %[[CST:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}>
      // CHECK: %[[R0:.*]] = "tf.Transpose"(%[[ARG0]], %[[CST]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir

    // CHECK-NOT: "tfl.batch_matmul"
    func.func @Batchmatmul2FullyconnectedQDQ(%arg0: tensor<4x128x2xf32>, %arg1: tensor<2x1xf32>) -> (tensor<4x128x1xf32>) {
      %0 = arith.constant dense<[[1.0], [2.0]]> : tensor<2x1xf32>
      %1 = "tfl.quantize"(%0) {qtype = tensor<2x1x!quant.uniform<i8:f32, 0.024986599940879671:92>>} : (tensor<2x1xf32>) -> tensor<2x1x!quant.uniform<i8:f32, 0.024986599940879671:92>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

    func.func @pack(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> {
      %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
      func.return %0 : tensor<2x1xf32>
    }
    
    // CHECK:   func @pack(%[[VAL_0:.*]]: tensor<1xf32>, %[[VAL_1:.*]]: tensor<1xf32>) -> tensor<2x1xf32> {
    // CHECK-DAG:       %[[VAL_2:.*]] = "tfl.pseudo_const"{{.*}}dense<1> : tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir

        %1:2 = "tf.IteratorGetNext"(%arg4) {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : (tensor<*x!tf_type.resource>) -> (tensor<2x224x224x3xf32>, tensor<2x1xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 37.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

        %2 = func.call @func_2_CPU_FLOAT(%0, %1) {tac.interface_name = "func_2"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
        func.return %2 : tensor<2x1xf32>
      }
    
      func.func private @func_2_CPU_FLOAT(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> attributes {tac.device = "CPU", tac.inference_type = "FLOAT", tac.interface_name = "func_2"} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter_test.cc

      %3 = "tfl.pack"(%1, %2) {axis = 0 : i32, per_device_costs = {CPU = 2.0 : f32, GPU = -1.0 : f32}, values_count = 2 : i32, tac.device = "CPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
      func.return %3 : tensor<2x1xf32>
    })";
      const std::string kExpectedFB = CreateRuntimeMetadata();
      mlir::DialectRegistry registry;
      registry.insert<mlir::TFL::TensorFlowLiteDialect, mlir::arith::ArithDialect,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 06:11:34 UTC 2024
    - 6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/fold-broadcast.mlir

    }
    
    // CHECK-LABEL: @broadcast_add_implicit_fold
    func.func @broadcast_add_implicit_fold(%arg0: tensor<5x1xf32>, %arg1: tensor<7xf32>) -> tensor<5x7xf32> {
      %cst = arith.constant dense<[5, 7]> : tensor<2xi32>
      %0 = "tf.BroadcastTo"(%arg1, %cst) : (tensor<7xf32>, tensor<2xi32>) -> tensor<5x7xf32>
      %1 = "tf.AddV2"(%arg0, %0) : (tensor<5x1xf32>, tensor<5x7xf32>) -> tensor<5x7xf32>
      func.return %1 : tensor<5x7xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<256xf32>} : () -> tensor<256xf32>
      %cst_1 = "tf.Const"() {value = dense<0.500000e+00> : tensor<256xf32>} : () -> tensor<256xf32>
      %w = "tf.AddV2"(%cst, %cst_1) : (tensor<48x48x3x1xf32>, tensor<256xf32>) -> tensor<48x48x3x256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/tests/fuse_tpu_compile_and_execute_ops.mlir

      %1 = "tf.ReadVariableOp"(%0) {device = "/CPU:0"} : (tensor<!tf_type.resource<tensor<2x1xf32>>>) -> tensor<2x1xf32>
      %2:2 = "tf.Split"(%cst, %arg0) {device = "/CPU:0"} : (tensor<i32>,  tensor<1x4xf32>) -> (tensor<1x2xf32>, tensor<1x2xf32>)
      %3 = "tf.TPUExecute"(%2#0, %1, %program#0) {_producer_name = "UNKNOWN", device = "/TPU:0"} : (tensor<1x2xf32>, tensor<2x1xf32>, tensor<3x!tf_type.string>) -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.8K bytes
    - Viewed (0)
Back to top