Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 100 of 150 for 20xf32 (0.19 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir

        return %0 : tensor<2x2xf32>
      }
      func.func private @composite_matmul_fn_1(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> tensor<2x2xf32> attributes {tf_quant.composite_function} {
        %0 = "tf.MatMul"(%arg0, %arg1) {attr_map = "0:transpose_a,1:transpose_b", device = "", transpose_a = false, transpose_b = false} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
        return %0 : tensor<2x2xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir

    // PerChannel: %[[out_2:.*]] = "tf.PartitionedCall"(%arg0, %[[q_w]], %[[scale]], %[[zp]]) <{config = "", config_proto = "", executor_type = "",
    // PerChannel-SAME: f = @quantized_conv2d_fn_0}> : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xi8>, tensor<2xf32>, tensor<2xi32>) -> tensor<*xf32>
    // PerChannel: return %[[out_1]], %[[out_2]]
    
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir

        %26 = "tf.Cast"(%25) {Truncate = false} : (tensor<2xi64>) -> tensor<2xf32>
        %27 = "tf.Equal"(%14, %26) {incompatible_shape_error = true} : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xi1>
        %28 = "tf.Cast"(%27) {Truncate = false} : (tensor<2xi1>) -> tensor<2xf32>
        %29 = "tf.Sum"(%28, %6) {keep_dims = false} : (tensor<2xf32>, tensor<1xi32>) -> tensor<f32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 37.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_flow.mlir

      %2 = "tf.Conv2D"(%1, %0) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
      %3 = "tf.BiasAdd"(%2, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %1:2 = "tfl.split"(%cst, %0) {num_splits = 2 : i32} : (tensor<i32>, tensor<4xf32>) -> (tensor<2xf32>, tensor<2xf32>)
      %2 = "tfl.quantize"(%1#0) {qtype = tensor<2x!quant.uniform<u8:f32, 1.0>>} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.0>>
      %3 = "tfl.quantize"(%1#1) {qtype = tensor<2x!quant.uniform<u8:f32, 1.0>>} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfr/tests/decompose.mlir

    // CHECK: return %[[cast_i32]] : !tfr.tensor
    }
    
    // CHECK-LABEL: decompose_output_type
    func.func @decompose_output_type(%arg0: tensor<2xf32>) -> tensor<2xi32> {
      %0 = "tf.CastFloat"(%arg0) : (tensor<2xf32>) -> tensor<2xi32>
      func.return %0: tensor<2xi32>
    // CHECK: %[[i32:.*]] = tfr.constant i32 -> !tfr.attr
    // CHECK: tfr.call @tf__cast(%[[casted_arg:.*]], %[[i32]], %false) : (!tfr.tensor, !tfr.attr, i1) -> !tfr.tensor
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 16.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/order_by_dialect.mlir

      %0 = mhlo.constant dense<0.000000e+00> : tensor<32x10xf32>
      %1 = mhlo.constant dense<0.000000e+00> : tensor<32x28x28x5xf32>
      %2 = "tf.ReadVariableOp"(%arg4) : (tensor<!tf_type.resource<tensor<10xf32>>>) -> tensor<10xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 7.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/lstm.json

    // CHECK-DAG: %[[input_18:.*]] = "quantfork.stats"({{.*}}) <{layerStats = dense<[-8.000000e-01, 1.600000e+00]> : tensor<2xf32>}> : (tensor<1x4xf32>) -> tensor<1x4xf32>
    // CHECK-DAG: %[[input_19:.*]] = "quantfork.stats"({{.*}}) <{layerStats = dense<[-2.000000e+00, 4.000000e+00]> : tensor<2xf32>}> : (tensor<1x2xf32>) -> tensor<1x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 06:25:50 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/partially_shaped_variables.py

        # CHECK: "tf_saved_model.global_tensor"() <{is_mutable, {{.*}} type = tensor<*xf32>, value = dense<0.000000e+00> : tensor<1xf32>}> {tf_saved_model.exported_names = ["v0"]} : () -> ()
        # CHECK: "tf_saved_model.global_tensor"() <{is_mutable, {{.*}} type = tensor<?xf32>, value = dense<[0.000000e+00, 1.000000e+00]> : tensor<2xf32>}> {tf_saved_model.exported_names = ["v1"]} : () -> ()
        self.v0 = tf.Variable([0.], shape=tf.TensorShape(None))
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:49:35 UTC 2023
    - 1.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-binary-elementwise.mlir

      %0 = "tf.RealDiv"(%arg0, %arg0) : (tensor<2xi32>, tensor<2xi32>) -> tensor<2xi32>
      func.return %0: tensor<2xi32>
    }
    
    // CHECK-LABEL: func @sub
    func.func @sub(%arg0: tensor<2xi32>) -> tensor<2xi32> {
      // CHECK-NEXT:  %0 = mhlo.subtract %arg0, %arg0 : tensor<2xi32>
      // CHECK-NEXT:  return %0 : tensor<2xi32>
      %0 = "tf.Sub"(%arg0, %arg0) : (tensor<2xi32>, tensor<2xi32>) -> tensor<2xi32>
      func.return %0: tensor<2xi32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 18.4K bytes
    - Viewed (0)
Back to top