Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 86 for 96xf32 (0.43 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/tensor_list_ops_decomposition.mlir

      // CHECK-NEXT: %[[ADDN:.*]] = "tf.AddN"(%[[UPDATE]], %[[BROADCAST]]) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>
      %addn = "tf.AddN"(%set, %tl) : (tensor<!tf_type.variant<tensor<f32>>>, tensor<!tf_type.variant<tensor<f32>>>) -> tensor<!tf_type.variant<tensor<f32>>>
      // CHECK-NEXT: %[[ZEROS_LIKE:.*]] = "tf.ZerosLike"(%[[ADDN]]) : (tensor<10xf32>) -> tensor<10xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 38.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

      func.return %0#0 : tensor<?x?x?x?xf32>
    }
    
    // -----
    
    // CHECK-LABEL: fusedBatchNormV3_training_dynamic_unsupported1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfrt/tests/mlrt/while_to_map_fn.mlir

      %outputs_50 =  "tf.Mul"(%outputs_30, %outputs_48) {device = ""} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
      %outputs_52 =  "tf.Reshape"(%outputs_50, %outputs_0) {device = ""} : (tensor<*xf32>, tensor<2xi32>) -> tensor<*xf32>
      %outputs_54 =  "tf.MatMul"(%outputs_40, %outputs_52) {device = "", transpose_a = false, transpose_b = false} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 23 06:40:22 UTC 2024
    - 68.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/ops.mlir

      %0 = "tfl.unidirectional_sequence_lstm"(%arg0,...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/optimize.mlir

      %2 = "tf.Mul"(%1, %value) {T = "tfdtype$DT_FLOAT"} : (tensor<256x8x7x16xf32>, tensor<16xf32>) -> tensor<256x8x7x16xf32>
      func.return %2 : tensor<256x8x7x16xf32>
    
    // CHECK-DAG: %[[cst:.*]] = "tf.Const{{.*}} dense<8.000000e+00> : tensor<3x3x3x16xf32>
    // CHECK-DAG: %[[cst_0:.*]] = "tf.Const{{.*}} dense<1.200000e+01> : tensor<16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant.mlir

      %min = arith.constant dense<0.0> : tensor<16xf32>
      %max = arith.constant dense<255.0> : tensor<16xf32>
      %mini = "tf.Identity"(%min) : (tensor<16xf32>) -> tensor<16xf32>
      %maxi = "tf.Identity"(%max) : (tensor<16xf32>) -> tensor<16xf32>
      %fq = "tf.FakeQuantWithMinMaxVarsPerChannel"(%in, %mini, %maxi) {num_bits = 5, narrow_range = false} : (tensor<3x3x3x16xf32>, tensor<16xf32>, tensor<16xf32>) -> tensor<3x3x3x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant_4bit.mlir

      %min = arith.constant dense<0.0> : tensor<16xf32>
      %max = arith.constant dense<15.0> : tensor<16xf32>
      %mini = "tf.Identity"(%min) : (tensor<16xf32>) -> tensor<16xf32>
      %maxi = "tf.Identity"(%max) : (tensor<16xf32>) -> tensor<16xf32>
      %fq = "tf.FakeQuantWithMinMaxVarsPerChannel"(%in, %mini, %maxi) {num_bits = 3, narrow_range = false} : (tensor<3x3x3x16xf32>, tensor<16xf32>, tensor<16xf32>) -> tensor<3x3x3x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/preprocess_op_weight_only.mlir

        %1 = "tf.BiasAdd"(%0, %cst_0) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<6xf32>) -> tensor<*xf32>
        func.return %1: tensor<*xf32>
      }
      func.func private @composite_depthwise_conv2d_fn(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/preprocess_op.mlir

        %1 = "tf.BiasAdd"(%0, %cst_0) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<6xf32>) -> tensor<*xf32>
        func.return %1: tensor<*xf32>
      }
      func.func private @composite_depthwise_conv2d_fn(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/tests/fold-constants-to-subgraph.mlir

      %2 = func.call @fold_all_test(%arg0, %0, %1) : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<256x30x30x16xf32>
      func.return %2 : tensor<256x30x30x16xf32>
    }
    
    // ALL-LABEL: @fold_all_test
    func.func @fold_all_test(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<16x3x3x3xf32>, %arg2: tensor<16xf32>) -> tensor<256x30x30x16xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 10.5K bytes
    - Viewed (0)
Back to top