- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 125 for CONV (0.04 sec)
-
tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant.mlir
// CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]]) // CHECK: %[[CONV:.*]] = "tfl.conv_2d"(%arg0, %[[DEQUANTIZE]], %[[CONSTANT]]) // CHECK: return %[[CONV]] } // CHECK-LABEL: perChannelFakeQuantWithConv2D func.func @perChannelFakeQuantWithConv2D(tensor<256x32x32x3xf32>) -> (tensor<256x8x7x16xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 20.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/dilated_conv.h
// dilation rate. // TF python library will rewrite dilated conv to // "SpaceToBatch->Conv->BatchToSpace" pattern, and the Conv in the middle // always has 'VALID' padding. The padding tensor in `SpaceToBatch` has two // parts of contributions, one is to reduce padding of CONV from 'SAME' to // 'VALID', and another is to make input shape multiples of dilation rate. The
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_weight_param.mlir
return %0 : tensor<1x3x4x2xf32> } // CHECK: func private @composite_conv_fn // CHECK: %[[CONV:.+]] = stablehlo.convolution // CHECK: return %[[CONV]] } // ----- // Test that q/dq pair with per-channel quantization parameter is inserted // between constant and XlaCallModule op with `weight_only_ptq` method of
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 22K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/default_quant_params.mlir
func.return %7 : tensor<1x112x112x32x!quant.uniform<u8:f32, 1.0>> // CHECK: %[[conv:.*]] = "tfl.conv_2d"(%arg0, %arg1, %arg2) // CHECK-SAME: -> tensor<1x112x112x32x!quant.uniform<u8:f32, 0.0078431372549019607:128>> // CHECK: %[[cst:.*]] = "tfl.pseudo_qconst"() // CHECK: %[[add:.*]] = tfl.add(%[[conv]], %[[cst]]) // CHECK-SAME: -> tensor<1x112x112x32x!quant.uniform<u8:f32, 1.000000e+00>> // CHECK: return %[[add]] }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 8.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_weight_only.mlir
// CHECK: %[[CONV:.+]] = stablehlo.convolution(%[[ARG1]], %[[ARG2]]) // CHECK-SAME: (tensor<1x3x4x3xf32>, tensor<2x3x3x2x!quant.uniform<i8:f32, 6.000000e-03:-128>>) -> tensor<1x3x4x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 4.8K bytes - Viewed (0) -
test/typeparam/issue49027.dir/main.go
// Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "./a" "fmt" ) func main() { s := "foo" x := a.Conv(s) if x != s { panic(fmt.Sprintf("got %s wanted %s", x, s)) } y, ok := a.Conv2(s) if !ok { panic("conversion failed") } if y != s { panic(fmt.Sprintf("got %s wanted %s", y, s)) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 24 02:14:15 UTC 2022 - 617 bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/convert_tpu_model_to_cpu.mlir
// CHECK: %[[cast:.*]] = "tf.Cast"(%[[cst]]) <{Truncate = false}> : (tensor<2x3x3x2xbf16>) -> tensor<2x3x3x2xf32> // CHECK: %[[conv:.*]] = "tf.Conv2D"(%[[ARG0]], %[[cast]]) // CHECK: %[[identity:.*]] = "tf.IdentityN"(%[[conv]]) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32> // CHECK: return %[[identity]] : tensor<1x3x2x2xf32> // ----- // Tests that `tf.BatchFunction` is inlined.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/mnist_train.py
num_channels = 1 # Training parameters. learning_rate = 0.001 display_step = 10 batch_size = 32 # Network parameters. n_hidden_1 = 32 # 1st conv layer number of neurons. n_hidden_2 = 64 # 2nd conv layer number of neurons. n_hidden_3 = 64 # 1st fully connected layer of neurons. flatten_size = num_features // 16 * n_hidden_2 seed = 66478 class FloatModel(tf.Module):
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 20 03:05:18 UTC 2021 - 6.5K bytes - Viewed (0) -
src/cmd/compile/internal/walk/assign.go
// if uint(newLen) <= uint(oldCap) nif := ir.NewIfStmt(base.Pos, nil, nil, nil) nuint := typecheck.Conv(newLen, types.Types[types.TUINT]) scapuint := typecheck.Conv(oldCap, types.Types[types.TUINT]) nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLE, nuint, scapuint) nif.Likely = true // then { s = s[:newLen] }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:09:06 UTC 2024 - 20.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir
%arg2: tensor<256xf32>, // batch_norm args %arg3: tensor<7x7x3x64xf32>, // conv filter #0 %arg4: tensor<1x1x64x256xf32> // conv filter #1 ) -> tensor<?x256xf32> { // This is a simplified ResNet layer that gets input in NHWC format, converts
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 7.3K bytes - Viewed (0)