- Sort Score
- Result 10 results
- Languages All
Results 131 - 140 of 214 for add32a (0.11 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/compile_mlir_util/add.mlir
module attributes {tf.versions = {producer = 179 : i32}} { func.func @main(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> { %0 = "tf.AddV2"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> func.return %0 : tensor<f32> } } // CHECK-LABEL: HloModule main // CHECK: ENTRY %main.{{[0-9]+}} ([[ARG0:.*]]: f32[], [[ARG1:.*]]: f32[]) -> (f32[]) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 23 18:56:13 UTC 2022 - 2.7K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_arm64.s
RET #ifndef GOARM64_LSE load_store_loop: LDAXRW (R0), R2 ORR R1, R2, R3 STLXRW R3, (R0), R4 CBNZ R4, load_store_loop MOVD R2, ret+16(FP) RET #endif // func And32(addr *uint32, v uint32) old uint32 TEXT ·And32(SB), NOSPLIT, $0-20 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 9K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_ppc64x.s
MOVD ptr+0(FP), R3 MOVW val+8(FP), R4 LWSYNC again: LWAR (R3), R6 OR R4, R6, R7 STWCCC R7, (R3) BNE again MOVW R6, ret+16(FP) RET // func And32(addr *uint32, v uint32) old uint32 TEXT ·And32(SB), NOSPLIT, $0-20 MOVD ptr+0(FP), R3 MOVW val+8(FP), R4 LWSYNC again: LWAR (R3),R6 AND R4, R6, R7 STWCCC R7, (R3) BNE again MOVW R6, ret+16(FP) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7.5K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_riscv64.s
// func Or32(ptr *uint32, val uint32) uint32 TEXT ·Or32(SB), NOSPLIT, $0-20 MOV ptr+0(FP), A0 MOVW val+8(FP), A1 AMOORW A1, (A0), A2 MOVW A2, ret+16(FP) RET // func And32(ptr *uint32, val uint32) uint32 TEXT ·And32(SB), NOSPLIT, $0-20 MOV ptr+0(FP), A0 MOVW val+8(FP), A1 AMOANDW A1, (A0), A2 MOVW A2, ret+16(FP) RET // func Or64(ptr *uint64, val uint64) uint64
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7K bytes - Viewed (0) -
test/newinline.go
// Test, using compiler diagnostic flags, that inlining is working. // Compiles but does not run. package foo import ( "errors" "runtime" "unsafe" ) func add2(p *byte, n uintptr) *byte { // ERROR "can inline add2" "leaking param: p to result" return (*byte)(add1(unsafe.Pointer(p), n)) // ERROR "inlining call to add1" }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 16 20:15:25 UTC 2023 - 11.2K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mips64x.s
MOVV ptr+0(FP), R1 MOVW val+8(FP), R2 SYNC LL (R1), R3 OR R2, R3, R4 SC R4, (R1) BEQ R4, -3(PC) SYNC MOVW R3, ret+16(FP) RET // func And32(addr *uint32, v uint32) old uint32 TEXT ·And32(SB), NOSPLIT, $0-20 MOVV ptr+0(FP), R1 MOVW val+8(FP), R2 SYNC LL (R1), R3 AND R2, R3, R4 SC R4, (R1) BEQ R4, -3(PC) SYNC MOVW R3, ret+16(FP) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 21:29:34 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.td
After this pass, the computation would become: ```mlir %resource_handle = "tf.VarHandleOp"() %init_value = "tf.ReadVariableOp"(%resource_handle) %1:2 = "tf_device.cluster"() ( { %new_value = "tf.AddV2"(%init_value, %init_value) tf_device.return %new_value, %new_value }) "tf.AssignVariableOp"(%resource_handle, %1#1) ``` You can see that there are a few main changes applied:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir
%float_zp = "tf.Cast"(%zp) {Truncate = false} : (tensor<*xi32>) -> tensor<*xf32> %zp_plus_round_cst = "tf.AddV2"(%float_zp, %round_cst) : (tensor<*xf32>, tensor<f32>) -> tensor<*xf32> %div = "tf.Div"(%input, %scale) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> %add = "tf.AddV2"(%div, %zp_plus_round_cst) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> %round = "tf.Floor"(%add) : (tensor<*xf32>) -> tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 12.2K bytes - Viewed (0) -
src/net/udpsock_test.go
continue } if err == nil { addr2, err := ResolveUDPAddr(addr.Network(), addr.String()) if !reflect.DeepEqual(addr2, tt.addr) || err != tt.err { t.Errorf("(%q, %q): ResolveUDPAddr(%q, %q) = %#v, %v, want %#v, %v", tt.network, tt.litAddrOrName, addr.Network(), addr.String(), addr2, err, tt.addr, tt.err) } } } } func TestWriteToUDP(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Sep 18 17:20:52 UTC 2023 - 17.2K bytes - Viewed (0) -
src/net/tcpsock_test.go
continue } if err == nil { addr2, err := ResolveTCPAddr(addr.Network(), addr.String()) if !reflect.DeepEqual(addr2, tt.addr) || err != tt.err { t.Errorf("(%q, %q): ResolveTCPAddr(%q, %q) = %#v, %v, want %#v, %v", tt.network, tt.litAddrOrName, addr.Network(), addr.String(), addr2, err, tt.addr, tt.err) } } } } var tcpListenerNameTests = []struct {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Feb 20 06:04:31 UTC 2024 - 17.7K bytes - Viewed (0)