Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 638 for zeroing (0.23 sec)

  1. src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go

    			reg: regInfo{
    				inputs:   []regMask{buildReg("R2"), buildReg("R1")},
    				clobbers: buildReg("R1 R2 R31"),
    			},
    			faultOnNilArg0: true,
    			faultOnNilArg1: true,
    		},
    
    		// large or unaligned zeroing
    		// arg0 = address of memory to zero (in R1, changed as side effect)
    		// arg1 = address of the last element to zero
    		// arg2 = mem
    		// auxint = alignment
    		// returns mem
    		//	SUBV	$8, R1
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 24 03:36:31 UTC 2023
    - 25.5K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/ssa/_gen/MIPSOps.go

    		{name: "LoweredAtomicOr", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
    
    		// large or unaligned zeroing
    		// arg0 = address of memory to zero (in R1, changed as side effect)
    		// arg1 = address of the last element to zero
    		// arg2 = mem
    		// auxint = alignment
    		// returns mem
    		//	SUBU	$4, R1
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 24 14:43:03 UTC 2023
    - 24K bytes
    - Viewed (0)
  3. test/prove.go

    	b := make([]int, 5)
    	c := make([]int, 5)
    	for i := -1; i <= 0; i-- {
    		b[i] = i
    		n++
    		if n > 10 {
    			break
    		}
    	}
    	useSlice(a)
    	useSlice(c)
    }
    
    // Check that prove is zeroing these right shifts of positive ints by bit-width - 1.
    // e.g (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) && ft.isNonNegative(n) -> 0
    func sh64(n int64) int64 {
    	if n < 0 {
    		return n
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jan 23 00:02:36 UTC 2024
    - 21.2K bytes
    - Viewed (0)
  4. src/runtime/stack_test.go

    }
    
    func useStackPtrs(n int, b bool) {
    	if b {
    		// This code contributes to the stack frame size, and hence to the
    		// stack copying cost. But since b is always false, it costs no
    		// execution time (not even the zeroing of a).
    		var a [128]*int // 1KB of pointers
    		a[n] = &n
    		n = *a[0]
    	}
    	if n == 0 {
    		return
    	}
    	useStackPtrs(n-1, b)
    }
    
    type structWithMethod struct{}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jun 14 00:03:57 UTC 2023
    - 23.1K bytes
    - Viewed (0)
  5. src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go

    			},
    			typ:            "Mem",
    			faultOnNilArg0: true,
    			faultOnNilArg1: true,
    		},
    
    		// Generic moves and zeros
    
    		// general unaligned zeroing
    		// arg0 = address of memory to zero (in X5, changed as side effect)
    		// arg1 = address of the last element to zero (inclusive)
    		// arg2 = mem
    		// auxint = element size
    		// returns mem
    		//	mov	ZERO, (X5)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Mar 07 14:57:07 UTC 2024
    - 30.7K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/ssa/_gen/ARM64.rules

    // strip off fractional word zeroing
    (Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
    	(Zero [8]
    		(OffPtr <ptr.Type> ptr [s-8])
    		(Zero [s-s%16] ptr mem))
    (Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
    	(Zero [16]
    		(OffPtr <ptr.Type> ptr [s-16])
    		(Zero [s-s%16] ptr mem))
    
    // medium zeroing uses a duff device
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 15:49:20 UTC 2024
    - 113.1K bytes
    - Viewed (0)
  7. src/runtime/mgcsweep.go

    			unlock(&mheap_.lock)
    		})
    		return false
    	}
    
    	if spc.sizeclass() != 0 {
    		// Handle spans for small objects.
    		if nfreed > 0 {
    			// Only mark the span as needing zeroing if we've freed any
    			// objects, because a fresh span that had been allocated into,
    			// wasn't totally filled, but then swept, still has all of its
    			// free slots zeroed.
    			s.needzero = 1
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:52:18 UTC 2024
    - 32.9K bytes
    - Viewed (0)
  8. test/codegen/memcombine.go

    	// ppc64le:`MOVW\s`
    	// ppc64:`MOVWBR`
    	b[(idx<<2)+3], b[(idx<<2)+2], b[(idx<<2)+1], b[(idx<<2)+0] = byte(val>>24), byte(val>>16), byte(val>>8), byte(val)
    }
    
    // ------------- //
    //    Zeroing    //
    // ------------- //
    
    // Check that zero stores are combined into larger stores
    
    func zero_byte_2(b1, b2 []byte) {
    	// bounds checks to guarantee safety of writes below
    	_, _ = b1[1], b2[1]
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Mar 21 19:45:41 UTC 2024
    - 29.7K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/ssa/_gen/AMD64.rules

    			(MOVOstoreconst [makeValAndOff(0,16)] destptr
    				(MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
    
    // Medium zeroing uses a duff device.
    (Zero [s] destptr mem)
    	&& s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice =>
    	(DUFFZERO [s] destptr mem)
    
    // Large zeroing uses REP STOSQ.
    (Zero [s] destptr mem)
    	&& (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32))
    	&& s%8 == 0 =>
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 12 19:38:41 UTC 2024
    - 93.9K bytes
    - Viewed (0)
  10. src/cmd/compile/internal/ssa/_gen/ARMOps.go

    				clobbers: buildReg("R0 R1 R2 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register
    			},
    			faultOnNilArg0: true,
    			faultOnNilArg1: true,
    		},
    
    		// large or unaligned zeroing
    		// arg0 = address of memory to zero (in R1, changed as side effect)
    		// arg1 = address of the last element to zero
    		// arg2 = value to store (always zero)
    		// arg3 = mem
    		// returns mem
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Feb 24 00:21:13 UTC 2023
    - 41K bytes
    - Viewed (0)
Back to top