Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 111 for PartitionedCall (0.25 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir

        %4 = "quantfork.stats"(%3) {
          layerStats = dense<[-1.0, 0.8]> : tensor<2xf32>
        } : (tensor<*xf32>) -> tensor<*xf32>
        %5 = "tf.PartitionedCall"(%4, %cst_1, %cst_2) {
          _tfl_quant_trait = "fully_quantizable", config = "", config_proto = "",
          executor_type = "", f = @composite_matmul_with_bias_fn_1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 9.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver.mlir

    func.func @serving_default(%arg0: tensor<1x3x4x3xf32>) -> (tensor<1x2x2x2xf32>) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_tensor:0", outputs = "PartitionedCall:0"}} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir

          attributes {tf_quant.quantized_ops = ${quantized_ops}} {
    
        %input_scale, %input_zp = "tf.PartitionedCall"(%input) {
            config = "", config_proto = "", executor_type = "", f=@internal_calculate_quant_params
          } : (tensor<*xf32>) -> (tensor<*xf32>, tensor<*xi32>)
    
        %quantized_input = "tf.PartitionedCall"(%input, %input_scale, %input_zp) {
            config = "", config_proto = "", executor_type = "", f=@internal_quantize_i8
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 03 15:43:38 UTC 2023
    - 12.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq_per_channel.mlir

        %0 = "quantfork.stats"(%arg0) {layerStats = dense<[1.27501142, 149.824783]> : tensor<2xf32>} : (tensor<1x3x4x3xf32>) -> tensor<1x3x4x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 4.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir

          attributes {tf_quant.quantized_ops = ${quantized_ops}} {
    
        %accum_out = "tf.PartitionedCall"(%input, %weight) {
            config = "", config_proto = "", executor_type = "", f=@${internal_func_name}
          } : (tensor<*xf32>, tensor<*xi8>) -> tensor<*xf32>
    
        %out = "tf.PartitionedCall"(%accum_out, %weight_scale) {
            config = "", config_proto = "", executor_type = "", f=@internal_dequantize_f32
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 03 15:43:38 UTC 2023
    - 7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/tests/mlrt/async_while.mlir

    // CHECK-NEXT:    %1 = "tf.AddV2"(%0, %cst) : (tensor<i32>, tensor<i32>) -> tensor<i32>
    // CHECK-NEXT:    "tf_mlrt.tf_promise"(%arg2, %1) : (!mlrt.promise, tensor<i32>) -> ()
    // CHECK-NEXT:    %2 = "tf.PartitionedCall"(%1, %arg5) <{config = "", config_proto = "", executor_type = "", f = @"map/while_cond/TfMlrtAsyncWhilePredicate"}> : (tensor<i32>, tensor<i32>) -> tensor<i1>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 22.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_xla.mlir

        %cst = "tf.Const"() {device = "", value = dense<[[[[-0.315365672, 0.27481091], [0.0901821703, -0.382271349], [-0.105572946, -0.354302853]], [[-0.47703138, -0.307006568],...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/function-control-ret.mlir

    module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 29 : i32}} {
      func.func @main() {
        tf_executor.graph {
          %0 = tf_executor.island wraps "tf.PartitionedCall"() {Tin = [], Tout = [], config = "", config_proto = "", device = "", executor_type = "", f = @foo, name = "Call_foo"} : () -> ()
          tf_executor.fetch
        }
        func.return
      }
      func.func @foo() {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 1004 bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/tf_to_hlo_pipeline/sccp-post-shape-inference.mlir

        %1 = "tf.Reshape"(%arg1, %0) : (tensor<19x10xf32>, tensor<2xi64>) -> tensor<?x?xf32>
    
        // CHECK: %[[RESULT:.*]] = mhlo.constant dense<[10, 19]>
        %2 = "tf.PartitionedCall"(%1) {config = "", config_proto = "", executor_type = "", f = @get_shape} : (tensor<?x?xf32>) -> (tensor<?xi64>)
    
        // CHECK: return %[[RESULT]]
        func.return %2 : tensor<?xi64>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jul 25 02:54:34 UTC 2023
    - 1020 bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/prepare_tpu_computation_for_tf_export.mlir

      // CHECK: _xla_original_oc_node_name = [[NODE_NAME1:.*]], _xla_token_input_nodes = ["_xla_token_arg_node"]
      %0 = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @PartitionedCall3} : (tensor<i32>) -> (tensor<i32>)
      // CHECK-NOT: _xla_token_input_nodes
      %1 = "tf.PartitionedCall"(%0) {config = "", config_proto = "", executor_type = "", f = @IdentityFunc} : (tensor<i32>) -> (tensor<i32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 18:46:36 UTC 2024
    - 9.2K bytes
    - Viewed (0)
Back to top