- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 439 for mul (0.03 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc
int64_t GetElementIndex(llvm::SmallVectorImpl<int64_t> &shape, llvm::SmallVectorImpl<int64_t> ¤t_index) { int64_t ind = 0; int64_t mul = 1; for (int i = shape.size() - 1; i >= 0; --i) { ind += (current_index[i] % shape[i]) * mul; mul *= shape[i]; } return ind; } // Helper method that increment index represented in 'current_index_ptr' // in the shape of 'result_shape'.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/generic.rules
(Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x)) // Rewrite x*y ± x*z to x*(y±z) (Add(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z)) => (Mul(64|32|16|8) x (Add(64|32|16|8) <t> y z)) (Sub(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z)) => (Mul(64|32|16|8) x (Sub(64|32|16|8) <t> y z)) // rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 22:21:05 UTC 2024 - 135.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/optimize-arg-operand-constraint.mlir
%1 = arith.constant dense<1.000000e+00> : tensor<f32> %2 = "tf.AddV2"(%arg0, %1) {T = "tfdtype$DT_FLOAT", device = "", name = "StatefulPartitionedCall/add"} : (tensor<1xf32>, tensor<f32>) -> tensor<1xf32> %3 = "tf.Mul"(%2, %0) {T = "tfdtype$DT_FLOAT", device = "", name = "output_node"} : (tensor<1xf32>, tensor<f32>) -> tensor<1xf32> func.return %3 : tensor<1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 24 05:47:26 UTC 2022 - 719 bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize.mlir
// We cannot fuse this tfl.mul into the preceding conv op because %cst2 is not broadcast-compatible to %cst0. %1 = "tfl.mul"(%0, %cst2) {fused_activation_function = "RELU6"} : (tensor<1x4x4x2xf32>, tensor<4x2xf32>) -> tensor<1x4x4x2xf32> func.return %1 : tensor<1x4x4x2xf32> // CHECK: %0 = "tfl.depthwise_conv_2d"(%arg0, %cst, %cst_0) // CHECK: %1 = tfl.mul(%0, %cst_1) // CHECK: return %1 }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 284.1K bytes - Viewed (0) -
tensorflow/cc/tools/freeze_saved_model_test.cc
GraphDef graph_def; Scope scope = Scope::NewRootScope(); Output a = ops::Const(scope.WithOpName("a"), 10.0f, {}); Output b = ops::Const(scope.WithOpName("b"), 10.0f, {}); Output c = ops::Mul(scope.WithOpName("c"), a, b); if (use_resource) { Output var = ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {}); Output read_var = ops::ReadVariableOp(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 07 13:30:31 UTC 2022 - 21.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/const-fold.mlir
%5 = "tfl.mul"(%0, %1) {fused_activation_function = "NONE"} : (tensor< f32>, tensor< f32>) -> tensor< f32> %6 = "tfl.mul"(%0, %3) {fused_activation_function = "NONE"} : (tensor< f32>, tensor<4xf32>) -> tensor<4xf32> %7 = "tfl.mul"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor< f32>) -> tensor<4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 45.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/tf-tfl-translate-serialize-stablehlo-multiply.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 00:55:13 UTC 2023 - 306 bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/AMD64.rules
// so it must be used with care. (MUL(Q|L)const [-9] x) => (NEG(Q|L) (LEA(Q|L)8 <v.Type> x x)) (MUL(Q|L)const [-5] x) => (NEG(Q|L) (LEA(Q|L)4 <v.Type> x x)) (MUL(Q|L)const [-3] x) => (NEG(Q|L) (LEA(Q|L)2 <v.Type> x x)) (MUL(Q|L)const [-1] x) => (NEG(Q|L) x) (MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0]) (MUL(Q|L)const [ 1] x) => x (MUL(Q|L)const [ 3] x) => (LEA(Q|L)2 x x) (MUL(Q|L)const [ 5] x) => (LEA(Q|L)4 x x)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 93.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/optimize.mlir
%1 = "tf.BiasAdd"(%0, %bias) {T = "tfdtype$DT_FLOAT", data_format = "NHWC"}: (tensor<256x8x7x16xf32>, tensor<16xf32>) -> tensor<256x8x7x16xf32> %2 = "tf.Mul"(%1, %value) {T = "tfdtype$DT_FLOAT"} : (tensor<256x8x7x16xf32>, tensor<16xf32>) -> tensor<256x8x7x16xf32> func.return %2 : tensor<256x8x7x16xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/keras_imagenet_main_graph_mode.golden_summary
Const 357 Conv2D 53 Conv2DBackpropFilter 53 Conv2DBackpropInput 52 DivNoNan 1 Equal 1 FusedBatchNorm 53 FusedBatchNormGrad 53 Identity 2 MatMul 3 MaxPool 1 MaxPoolGrad 1 Mean 1 Mul 164 Pad 1 ReadVariableOp 646 Relu 49 ReluGrad 49 Reshape 2 ResourceApplyKerasMomentum 161 ShapeN 50 Softmax 1 SparseSoftmaxCrossEntropyWithLogits 1 Square 55 Squeeze 1 Sub 106 Sum 57
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 06 10:38:14 UTC 2023 - 740 bytes - Viewed (0)