Search Options

Results per page
Sort
Preferred Languages
Advance

Results 111 - 120 of 173 for 28xf32 (0.09 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/lower_variable_ops_to_ml_program.mlir

        %3 = "tf.Mul"(%1, %2) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>
        return %3 : tensor<10xf32>
      }
    }
    
    // -----
    
    // CHECK-LABEL: module
    module attributes {tf_saved_model.semantics} {
      // CHECK: ml_program.global{{.*}}mutable{{.*}}@vars.v
      "tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<10xf32>, value = dense<[0.,10.,2.,3.,4.,5.,6.,7.,8.,9.]> : tensor<10xf32> } : () -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Sep 19 19:00:41 UTC 2022
    - 6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir

    // PerChannel: %[[out_2:.*]] = "tf.PartitionedCall"(%arg0, %[[q_w]], %[[scale]], %[[zp]]) <{config = "", config_proto = "", executor_type = "",
    // PerChannel-SAME: f = @quantized_conv2d_fn_0}> : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xi8>, tensor<2xf32>, tensor<2xi32>) -> tensor<*xf32>
    // PerChannel: return %[[out_1]], %[[out_2]]
    
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/optimize_functional_ops.mlir

      func.return %0 : tensor<*xf32>
    }
    
    func.func private @add(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32>  {
      %0 = "tf.Add"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
      func.return %0 : tensor<*xf32>
    }
    
    func.func private @mul(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32>  {
      %0 = "tf.Multiply"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 30 10:34:48 UTC 2022
    - 8.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir

        %26 = "tf.Cast"(%25) {Truncate = false} : (tensor<2xi64>) -> tensor<2xf32>
        %27 = "tf.Equal"(%14, %26) {incompatible_shape_error = true} : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xi1>
        %28 = "tf.Cast"(%27) {Truncate = false} : (tensor<2xi1>) -> tensor<2xf32>
        %29 = "tf.Sum"(%28, %6) {keep_dims = false} : (tensor<2xf32>, tensor<1xi32>) -> tensor<f32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 37.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump_test.cc

    module{
      func.func @main(%arg0: tensor<10xf32>) -> tensor<10xf32> {
        return %arg0 : tensor<10xf32>
      }
      func.func @func1(%arg0: tensor<10xf32>, %arg1: tensor<10xf32>) -> tensor<10xf32> {
        %0 = stablehlo.add %arg0, %arg1 : tensor<10xf32>
        %1 = stablehlo.add %arg0, %arg1 : tensor<10xf32>
        return %0 : tensor<10xf32>
      }
    })mlir";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:17:14 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_flow.mlir

      %2 = "tf.Conv2D"(%1, %0) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
      %3 = "tf.BiasAdd"(%2, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %1:2 = "tfl.split"(%cst, %0) {num_splits = 2 : i32} : (tensor<i32>, tensor<4xf32>) -> (tensor<2xf32>, tensor<2xf32>)
      %2 = "tfl.quantize"(%1#0) {qtype = tensor<2x!quant.uniform<u8:f32, 1.0>>} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.0>>
      %3 = "tfl.quantize"(%1#1) {qtype = tensor<2x!quant.uniform<u8:f32, 1.0>>} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfr/tests/decompose.mlir

    // CHECK: return %[[cast_i32]] : !tfr.tensor
    }
    
    // CHECK-LABEL: decompose_output_type
    func.func @decompose_output_type(%arg0: tensor<2xf32>) -> tensor<2xi32> {
      %0 = "tf.CastFloat"(%arg0) : (tensor<2xf32>) -> tensor<2xi32>
      func.return %0: tensor<2xi32>
    // CHECK: %[[i32:.*]] = tfr.constant i32 -> !tfr.attr
    // CHECK: tfr.call @tf__cast(%[[casted_arg:.*]], %[[i32]], %false) : (!tfr.tensor, !tfr.attr, i1) -> !tfr.tensor
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 16.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/func_list_attr.mlir

    }
    
    // CHECK-DAG: name: "bar"
    func.func @bar() -> tensor<10xf32> {
      %0 = tf_executor.graph {
        %1:2 = tf_executor.island wraps "tf.Const"() {device = "", dtype = "tfdtype$DT_FLOAT", value = dense<2.000000e+00> : tensor<10xf32>} : () -> tensor<10xf32> loc("const_2")
        tf_executor.fetch %1#0 : tensor<10xf32>
      }
      func.return %0 : tensor<10xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 2.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/strip_tf_attributes.mlir

                                 %arg2: tensor<5xf32> {tf._user_specified_name = "b1"},
                                 %arg3: tensor<3920x10xf32> {tf._user_specified_name = "w2"},
                                 %arg4: tensor<10xf32> {tf._user_specified_name = "b2"}) -> tensor<10xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 25 20:04:10 UTC 2022
    - 1.5K bytes
    - Viewed (0)
Back to top