Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 25 for 2x2xf64 (0.11 sec)

  1. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc

              %outputs_9, %control_10 = tf_executor.island wraps "tf.XlaSharding"(%outputs_7) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<2x2xf32>
              tf_executor.fetch %outputs_9 : tensor<2x2xf32>
            }
            return %0 : tensor<2x2xf32>
          }
        }
      )";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 08:08:57 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/unfuse_mhlo_batch_norm.mlir

    // Validate that epsilon is properly promoted to f64
    // CHECK-DAG: %[[EPS:.+]] = mhlo.constant dense<1.000000e+00> : tensor<256xf64>
    func.func @batchNormInference_f64(
        %x: tensor<4x256xf64>, %scale: tensor<256xf64>, %offset: tensor<256xf64>,
        %mean: tensor<256xf64>, %variance: tensor<256xf64>)
        -> (tensor<4x256xf64>) {
      %0 = "mhlo.batch_norm_inference"(%x, %scale, %offset, %mean, %variance)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 10.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

          %2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
          return %2 : tensor<?x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

    func.func @uniform_quantize_and_dequantize_per_axis(%arg0 : tensor<2x2xf32>) -> tensor<2x2xf32> {
      %scales = "tf.Const"() { value = dense<[1.0, 2.0]> : tensor<2xf32> } : () -> tensor<2xf32>
      %zps = "tf.Const"() { value = dense<[3, 4]> : tensor<2xi32> } : () -> tensor<2xi32>
    
      // CHECK: %[[QUANTIZE:.*]] = mhlo.uniform_quantize %arg0 : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<i8:f32:0, {1.000000e+00:3,2.000000e+00:4}>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/quantize.mlir

    func.func @NotQuantizeFloatConst() -> tensor<2x2xf32> {
      %0 = arith.constant dense<-0.1> : tensor<2x2xf32>
      %1 = "tfl.quantize"(%0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>
      %2 = "tfl.dequantize"(%1) : (tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>) -> tensor<2x2xf32>
      func.return %2 : tensor<2x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %1 = "tfl.add"(%cst_2, %cst_1) {fused_activation_function = "NONE"} : (tensor<2x2x2xf32>, tensor<  2x2xf32>) -> tensor<2x2x2xf32>
      %2 = "tfl.add"(%cst_0, %cst_2) {fused_activation_function = "NONE"} : (tensor<    2xf32>, tensor<2x2x2xf32>) -> tensor<2x2x2xf32>
    
      func.return %0, %1, %2 : tensor<2x2xf32>, tensor<2x2x2xf32>, tensor<2x2x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo.mlir

        "mhlo.return"(%10, %15) : (tensor<f32>, tensor<i32>) -> ()
      }) {dimensions = dense<2> : tensor<1xi64>} : (tensor<2x2x4xf32>, tensor<2x2x4xi32>, tensor<f32>, tensor<i32>) -> (tensor<2x2xf32>, tensor<2x2xi32>)
      func.return %4#0, %4#1 : tensor<2x2xf32>, tensor<2x2xi32>
    
      // CHECK-DAG:  %0 = mhlo.constant dense<0xFF800000> : tensor<f32>
      // CHECK-DAG:  %1 = mhlo.constant dense<0> : tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 40.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<2x2xf32>} : () -> tensor<2x2xf32>
      %0 = "tf.Conv2D"(%arg0, %cst) {data_format = "NHWC", dilations = [1, 1, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x3x2x2xf32>
      %1 = "tf.Mul"(%0, %cst_0) : (tensor<1x3x2x2xf32>, tensor<2x2xf32>) -> tensor<1x3x2x2xf32>
      func.return %1 : tensor<1x3x2x2xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_same_scale.mlir

        %7 = stablehlo.slice %6 [1:3, 2:4] : (tensor<3x4xf32>) -> tensor<2x2xf32>
        %8 = "quantfork.qcast"(%7) {volatile} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
        %9 = "quantfork.dcast"(%8) : (tensor<2x2x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<2x2xf32>
        return %9 : tensor<2x2xf32>
      }
    
      // CHECK: quantized_dot_general_fn_1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 35.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/lower-static-tensor-list.mlir

    // CHECK: [[UNPACK:%.*]]:3 = "tf.Unpack"(%arg0) <{axis = 0 : i64}> : (tensor<3x2x2xf32>) -> (tensor<2x2xf32>, tensor<2x2xf32>, tensor<2x2xf32>)
    // CHECK: [[SCALAR_ZERO:%.*]] = arith.constant dense<0> : tensor<i32>
    // CHECK: [[CONCAT:%.*]] = "tf.Concat"([[SCALAR_ZERO]], [[UNPACK]]#0, [[UNPACK]]#1, [[UNPACK]]#2) : (tensor<i32>, tensor<2x2xf32>, tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
    // CHECK: [[LENGTHS:%.*]] = arith.constant dense<0> : tensor<0xi64>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 39.9K bytes
    - Viewed (0)
Back to top