- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 102 for conv3 (0.17 sec)
-
tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir
// CHECK: %[[conv3d:.*]] = "tfl.conv_3d"(%arg0, %[[w]], %[[const]]) <{dilation_d_factor = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_d = 1 : i32, stride_h = 1 : i32, stride_w = 1 : i32}> : (tensor<?x28x28x28x8xf32>, tensor<3x3x3x8x16xf32>, none) -> tensor<?x26x26x26x16xf32> // CHECK: %2 = "tfl.shape"(%[[conv3d]]) : (tensor<?x26x26x26x16xf32>) -> tensor<5xi64>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 38.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td
[(HasRankOf<1> $add_rhs_value), (HasEqualElementSize<[-1], [0]> $conv_out, $add_rhs)], [], (addBenefit -1)>; // Convert conv+sub+mul pattern to conv+mul+add. // (conv - sub) * mul -> conv * mul + (-sub) * mul // // This is needed to support Conv+BatchNorm pattern from Jax models converted // using jax2tf w/o native serialization. Note that Jax2tf patterns always
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir
%b2 = arith.constant dense<[1.0e-2, 2.1473647e1, -2.1473647e2]> : tensor<3xf32> %conv = "tfl.conv_2d"(%0, %w, %b) { dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32 } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32> %conv2 = "tfl.conv_2d"(%0, %w, %b2) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir
%conv1 = "tfl.conv_2d"(%1, %2, %cst) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x224x224x3xf32>, tensor<32x3x3x3xf32>, tensor<32xf32>) -> tensor<1x112x112x32xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 67.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir
%zp_offset: tensor<?x2x2x1xi32>, %bias: tensor<1xi32> ) -> tensor<?x2x2x1xi32> { // CHECK-DAG: %[[conv:.*]] = mhlo.convolution // CHECK-DAG: %[[combined:.*]] = chlo.broadcast_add %[[zp_offset:.*]], %[[bias:.*]] // CHECK-DAG: %[[result:.*]] = chlo.broadcast_add %[[conv]], %[[combined]] // CHECK: return %[[result]] %0 = mhlo.convolution(%lhs, %rhs) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Feb 24 02:26:47 UTC 2024 - 10.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/optimize.td
def IsNull : Constraint<CPred<"!$0">>; // This pattern optimizes: // conv/dot_general + a + b -> conv/dot_general + (a + b) // conv/dot_general - a - b -> conv/dot_general - (a + b) // conv/dot_general + a - b -> conv/dot_general + (a - b) // conv/dot_general - a + b -> conv/dot_general - (a - b) foreach OpsTuple = [ [CHLO_BroadcastAddOp, CHLO_BroadcastAddOp, CHLO_BroadcastAddOp],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Feb 24 02:26:47 UTC 2024 - 2K bytes - Viewed (0) -
src/cmd/compile/internal/walk/builtin.go
return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING])) } if isByteCount(n) { conv := n.X.(*ir.ConvExpr) walkStmtList(conv.Init()) init.Append(ir.TakeInit(conv)...) _, len := backingArrayPtrLen(cheapExpr(conv.X, init)) return len } if isChanLenCap(n) { name := "chanlen" if n.Op() == ir.OCAP {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 08 22:35:22 UTC 2024 - 31.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/legalize-tfl-stablehlo-conv.mlir
Michael Levesque-Dion <******@****.***> 1706075999 -0800
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jan 24 06:08:43 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir
%b = arith.constant dense<-1.23697901> : tensor<64xf32> %conv = "tfl.conv_2d"(%arg0, %w, %b) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x224x224x3xf32>, tensor<64x3x3x3xf32>, tensor<64xf32>) -> tensor<1x112x112x64xf32> func.return %conv : tensor<1x112x112x64xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 23 21:09:00 UTC 2024 - 23.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir
// CHECK: %[[CONV:.+]] = stablehlo.convolution(%[[TRANSPOSE_0]], %[[CONST]]) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = {{\[\[}}1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x4x4x8xf32>, tensor<3x3x8x8xf32>) -> tensor<1x4x4x8xf32> // CHECK: %[[TRANSPOSE_1:.+]] = stablehlo.transpose %[[CONV]], dims = [0, 3, 1, 2] : (tensor<1x4x4x8xf32>) -> tensor<1x8x4x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 12.6K bytes - Viewed (0)