- Sort Score
- Result 10 results
- Languages All
Results 1 - 6 of 6 for add32a (0.11 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/decompose_resource_ops.mlir
// CHECK: [[ACCUM_NEW:%.*]] = "tf.AddV2"([[ACCUM_MOMENTUM]], [[GRAD]]) // CHECK: "tf.AssignVariableOp"([[ACCUM_HANDLE]], [[ACCUM_NEW]]) // CHECK: [[GRAD_LR:%.*]] = "tf.Mul"([[GRAD]], [[LR]]) // CHECK: [[MOMENTUM_LR:%.*]] = "tf.Mul"([[MOMENTUM]], [[LR]]) // CHECK: [[ACCUM_NEW_MOMENTUM_LR:%.*]] = "tf.Mul"([[ACCUM_NEW]], [[MOMENTUM_LR]]) // CHECK: [[DELTA:%.*]] = "tf.AddV2"([[GRAD_LR]], [[ACCUM_NEW_MOMENTUM_LR]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 19:47:48 UTC 2024 - 51.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/mlrt/while_to_map_fn.mlir
// CHECK-NEXT: [[loop_counter:%.*]] = "tf.AddV2"(%arg2, [[cst_1]]) // CHECK-NEXT: [[weight:%.*]] = "tf.ReadVariableOp"(%arg5) // CHECK-NEXT: [[mpy:%.*]] = "tf.MatMul"(%arg6, [[weight]]) // CHECK-NEXT: [[element_index:%.*]] = "tf.AddV2"(%arg3, [[cst_1]]) // CHECK-NEXT: [[bias:%.*]] = "tf.GatherV2"(%arg7, %arg3, [[cst_0]]) // CHECK-NEXT: [[res:%.*]] = "tf.AddV2"([[mpy]], [[bias]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 06:40:22 UTC 2024 - 68.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
// // is lowered to: // // %sum0 = "tf.AddV2"(%0, %1) // %result = "tf.AddV2"(%sum0, %2) // // While // // %result = "tf.AddN"(%0, %1, %2, %3, %4) // // is lowered to: // // %sum0 = "tf.AddV2"(%0, %1) // %sum1 = "tf.AddV2"(%2, %3) // %sum2 = "tf.AddV2"(%sum0, %sum1) // %result = "tf.AddV2"(%sum2, %4) // class LowerAddNOp : public RewritePattern { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir
%0 = "tf.AddV2"(%arg0, %arg1) : (tensor<ui32>, tensor<ui32>) -> tensor<ui32> func.return %0 : tensor<ui32> // CHECK-LABEL: add_v2_uint32 // CHECK: %[[CAST:.*]] = "tf.Cast"(%arg0) <{Truncate = false}> : (tensor<ui32>) -> tensor<i32> // CHECK: %[[CAST1:.*]] = "tf.Cast"(%arg1) <{Truncate = false}> : (tensor<ui32>) -> tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 59.8K bytes - Viewed (0) -
src/cmd/compile/internal/ppc64/ssa.go
ssa.OpPPC64LoweredAtomicAdd64: // LWSYNC // LDAR/LWAR (Rarg0), Rout // ADD Rarg1, Rout // STDCCC/STWCCC Rout, (Rarg0) // BNE -3(PC) // MOVW Rout,Rout (if Add32) ld := ppc64.ALDAR st := ppc64.ASTDCCC if v.Op == ssa.OpPPC64LoweredAtomicAdd32 { ld = ppc64.ALWAR st = ppc64.ASTWCCC } r0 := v.Args[0].Reg() r1 := v.Args[1].Reg() out := v.Reg0()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 19:59:38 UTC 2024 - 55.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64.rules
// GOPPC64 values indicate power8, power9, etc. // That means the code is compiled for that target, // and will not run on earlier targets. // (Add(Ptr|64|32|16|8) ...) => (ADD ...) (Add64F ...) => (FADD ...) (Add32F ...) => (FADDS ...) (Sub(Ptr|64|32|16|8) ...) => (SUB ...) (Sub32F ...) => (FSUBS ...) (Sub64F ...) => (FSUB ...) (Min(32|64)F x y) && buildcfg.GOPPC64 >= 9 => (XSMINJDP x y)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 53.2K bytes - Viewed (0)