- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 78 for Convolution (0.28 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/order_by_dialect.mlir
%4 = "tf.ReadVariableOp"(%arg1) : (tensor<!tf_type.resource<tensor<3x3x1x5xf32>>>) -> tensor<3x3x1x5xf32> %5 = "tf.ReadVariableOp"(%arg3) : (tensor<!tf_type.resource<tensor<3920x10xf32>>>) -> tensor<3920x10xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 7.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc
// %7 = stablehlo.convolution(%4, %6) // q1 * q2 (disguised in f32). // %8 = stablehlo.reshape %2 // z1 // %9 = stablehlo.broadcast_in_dim %8 // %10 = stablehlo.convert %9 // i8 -> f32 cast trick for z1. // %11 = stablehlo.convert %5 // i8 -> f32 cast trick for filter. // %12 = stablehlo.convolution(%10, %11) // q2 * z1 // %13 = stablehlo.subtract %7, %12 // q1 * q2 - q2 * z1
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
%out_scale : tensor<*xf32>, %out_zp : tensor<*xi32>) -> tensor<*x${output_type}> attributes {tf_quant.quantized_ops = ${quantized_ops}} { // Given the convolution takes 2 qint8 inputs and output a qint32. // The accumulation scale is (input_scale * filter_scale). // The accumulation zero point is 0.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nchw.mlir
// IMPORTANT: In the following Conv2D tests tensor shapes do not match // convolution parameters (stride, dilations, etc...). This test only verifies // that changing convolution data layout will update all the attributes. // CHECK-LABEL: func @transposeConv2D
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
] ) def depthwise_conv( self, input_tensor: core.Tensor ) -> Mapping[str, core.Tensor]: """Performs a 2D depthwise convolution operation. Args: input_tensor: Input tensor to perform convolution on. Returns: A map of: output key -> output result. """ scale = [1.0] * self.out_channel_size
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_weight_param.mlir
%0 = stablehlo.convolution(%arg0, %arg1) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[0, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x3x4x2xf32> return %0 : tensor<1x3x4x2xf32> } // CHECK: func private @composite_conv_fn // CHECK: %[[CONV:.+]] = stablehlo.convolution // CHECK: return %[[CONV]] }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 22K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h
std::unique_ptr<Pass> createUnfuseBatchNormPass(); // Creates a pass which constant folds broadcast_in_dim op conditionally. std::unique_ptr<Pass> createFoldBroadcastPass(); // Creates a pass which fuses MHLO binary element-wise ops and convolution op. std::unique_ptr<Pass> createFuseConvolutionPass(); // Creates a pass which applies various optimizations on MHLO IR. std::unique_ptr<Pass> createOptimizePass();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/preprocess_op_weight_only.mlir
module { // For XLA weight-only per-channel depthwise convolution, tensor shape should have // transformed from [H,W,C,M] to [H,W,1,CxM], func.func @depthwise_conv(%arg0: tensor<1x3x4x3xf32>) -> (tensor<*xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h
// Processes tensors with NCHW format (== (batch, channel, height, weight)) by // converting them to NHWC formats along with extra optimizations such as // constant folding the transpose->convolution pattern. This is useful when // downstream pipeline (e.g. XLA) is more optimized when accepting NHWC formats. void AddProcessNchwTensorPasses(OpPassManager& pm);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 12:53:33 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// usually has the following pattern. In the example below, // the input operand would be stablehlo.convolution op, and return value would // be stablehlo.add op. // // ``` // %0 = stablehlo.constant dense<3> // %1 = stablehlo.constant dense<4> // %2 = stablehlo.constant dense<2> // %3 = stablehlo.convolution(%%arg0, %%arg1) : // (tensor<?x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<?x3x4x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0)