Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 16 for composite_conv2d_fn (0.37 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.td

    def LiftConv : Pat<
      (TF_Conv2DOp:$res $input, $filter, $strides, $use_cudnn_on_gpu, $padding,
        $explicit_paddings, IsDataFormatNHWC:$data_format, $dilations),
      (LiftAsTFPartitionedCall<"composite_conv2d_fn">
        (ArgumentList $input, $filter),
        (ResultList $res),
        (NamedAttributeList
          (NamedAttr<"strides"> $strides),
          (NamedAttr<"use_cudnn_on_gpu"> $use_cudnn_on_gpu),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 3.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.td

    def LiftConv : Pat<
      (TF_Conv2DOp:$res $input, $filter, $strides, $use_cudnn_on_gpu, $padding,
        $explicit_paddings, IsDataFormatNHWC:$data_format, $dilations),
      (LiftAsTFPartitionedCall<"composite_conv2d_fn">
        (ArgumentList $input, $filter),
        (ResultList $res),
        (NamedAttributeList
          (NamedAttr<"strides"> $strides),
          (NamedAttr<"use_cudnn_on_gpu"> $use_cudnn_on_gpu),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 15.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir

        %2 = "tf.PartitionedCall"(%arg0, %weight) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_conv2d_fn_2} : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
        func.return %1, %2 : tensor<*xf32>, tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

        %5 = "tf.PartitionedCall"(%4, %cst_0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_conv2d_fn_1} : (tensor<1x3x2x2xf32>, tensor<2x3x2x2xf32>) -> tensor<1x3x2x2xf32>
        %6 = "quantfork.qcast"(%5) : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2x!quant.uniform<i8:f32, 0.0054901962186775953:-19>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir

        %2 = "tf.PartitionedCall"(%arg0, %weight) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_conv2d_fn_2} : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
        func.return %1, %2 : tensor<*xf32>, tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir

    // CHECK-SAME: f = @composite_conv2d_fn_2}>
    // CHECK-SAME: {_tfl_quant_trait = "fully_quantizable"
    // CHECK: %[[BIASADD_0:.*]] = "tf.BiasAdd"(%[[PARTITIONEDCALL_0]], %[[CONST_0]])
    // CHECK: %[[RELU6_0:.*]] = "tf.Relu6"(%[[BIASADD_0]])
    // CHECK: %[[PARTITIONEDCALL_1:.*]] = "tf.PartitionedCall"(%arg0, %[[CONST_1]])
    // CHECK-SAME: f = @composite_conv2d_fn_1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions_weight_only.mlir

        return %1 : tensor<1x3x4x2xf32>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_weight_param.mlir

      %cst = "tf.Const"() {value = dense<3.000000e-01> : tensor<2x3x3x2xf32>} : () -> tensor<2x3x3x2xf32>
      %0 = "tf.XlaCallModule"(%arg0, %cst) {
        Sout = [#tf_type.shape<1x2x2x2>], _entry_function = @composite_conv_fn,
        _original_entry_function = "composite_conv_fn",
        _stablehlo_module_attrs = {}, _quantization_method = "weight_only_ptq { }",
        device = "", dim_args_spec = [], disabled_checks = [],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 22K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_drq.mlir

        %2 = "tf.BiasAdd"(%1, %cst_0) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
        func.return %2: tensor<*xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_drq_per_channel.mlir

        %2 = "tf.BiasAdd"(%1, %cst_0) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
        func.return %2: tensor<*xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.8K bytes
    - Viewed (0)
Back to top