Search Options

Results per page
Sort
Preferred Languages
Advance

Results 111 - 120 of 173 for conv_3d (0.5 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir

        %5 = "tf.MatMul"(%1, %3) {
          attr_map = "transpose_a:0,transpose_b:1"
        } : (tensor<*xi32>, tensor<*xi32>) -> tensor<*xi32>
        func.return %5 : tensor<*xi32>
      }
    
      // Conv2D with int32 accumulation
      func.func private @internal_conv2d_fn(
                             %input : tensor<*xi8>, %filter : tensor<*xi8>,
                             %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 03 15:43:38 UTC 2023
    - 12.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc

          return false;
        }
        return true;
      }
    };
    std::unique_ptr<TargetHardwareOperation> CreateConcatOp() {
      return std::make_unique<GpuConcatOp>();
    }
    
    // Currently used for these ops:
    // tfl.conv_2d / tfl.depthwise_conv_2d / tfl.fully_connected
    class GpuConvOp : public TargetHardwareOperation {
      double GetOpCost(mlir::Operation* op) const override {
        int64_t arithmetic_count;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 7.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir

      }
    
      func.func private @composite_conv2d_fn_1(%arg0: tensor<1x2x2x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td

                  (UpdateShapeWithAxis<-1> $qtype, $old_value))),
              [(CanUpdateShapeWithAxis<-1> $qtype, $old_value)]>;
    
    // The axis is set to 0 because the transpose is from the legalization of
    // tf.conv2d and the new channel axis is the first dimension.
    def ReorderTransposeDequantQuantUsedByConv :
          Pat<(TF_TransposeOp:$old_value
                  (TFL_DequantizeOp (TFL_QuantizeOp $input, $qtype)), $perm),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir

      }
    
      func.func private @composite_conv2d_fn_1(%arg0: tensor<1x2x2x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h

          if (!input_type || !input_type.hasStaticShape()) {
            return false;
          }
          total_count += input_type.getNumElements();
        }
        *count = total_count;
        return true;
      }
    
      // For conv2d/depthwise_conv/fully_connected ops.
      // This algorithm actually comes from TOCO tooling_util.cc
      static bool GetArithmeticCountForConvAndFullyconnectedOp(mlir::Operation* op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

          def body(self, x, w):
            z = nn_ops.conv2d(x, w, padding='SAME')
            return z, w
    
          @def_function.function(
              input_signature=[
                  tensor_spec.TensorSpec(
                      shape=input_shape, dtype=dtypes.float32, name='input_tensor'
                  )
              ]
          )
          def main(self, x):
            x1 = nn_ops.conv2d(x, self.w, padding='SAME')
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized_drq.mlir

                             %input : tensor<*xf32>, %weight : tensor<*x!tf_type.qint8>,
                             %weight_scale : tensor<*xf32>, %weight_zp : tensor<*xi32>) -> tensor<*xf32>
          attributes {tf_quant.quantized_ops = ["Conv2D"]} {
    
        %out = "tf.UniformQuantizedConvolutionHybrid"(%input, %weight,
                               %weight_scale, %weight_zp) {
            Tlhs = "tfdtype$DT_FLOAT",
            Trhs = "tfdtype$DT_QINT8",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Dec 01 12:06:54 UTC 2022
    - 3.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc

        if ((quantization_method_ == tensorflow::quantization::QuantizationMethod::
                                         METHOD_DYNAMIC_RANGE_INT8) &&
            (function_name.contains("batch_matmul") ||
             function_name.contains("conv3d"))) {
          call_op->removeAttr(kQuantTraitAttrName);
        }
    
        // TODO(b/270906404): Support weight-only gather for uniform quantized opset
        // in PTQ mode
        if (target_opset_ == OpSet::UNIFORM_QUANTIZED &&
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_flow.mlir

      %1 = "tf.FakeQuantWithMinMaxArgs"(%arg0) {device = "", max = 2.000000e-01 : f32, min = -1.000000e-01 : f32, narrow_range = false, num_bits = 8 : i64} : (tensor<1x3x4x3xf32>) -> tensor<*xf32>
      %2 = "tf.Conv2D"(%1, %0) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3.5K bytes
    - Viewed (0)
Back to top