- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 28 for Malloc (0.13 sec)
-
src/regexp/testdata/testregex.c
T(" -h list help on standard error\n"); T(" -n do not repeat successful tests with regnexec()\n"); T(" -o ignore match[] overrun errors\n"); T(" -p ignore negative position mismatches\n"); T(" -s use stack instead of malloc\n"); T(" -x do not repeat successful tests with REG_NOSUB\n"); T(" -v list each test line\n"); T(" -A list failed test lines with actual answers\n"); T(" -B list all test lines with actual answers\n");
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Sep 08 04:08:51 UTC 2014 - 51.3K bytes - Viewed (0) -
src/cmd/go/internal/test/test.go
The following flags are also recognized by 'go test' and can be used to profile the tests during execution: -benchmem Print memory allocation statistics for benchmarks. Allocations made in C or using C.malloc are not counted. -blockprofile block.out Write a goroutine blocking profile to the specified file when all tests are complete. Writes test binary as -c would. -blockprofilerate n
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 14:34:32 UTC 2024 - 71.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc
return GetInverseScalesConstantOp().getValue(); } func::CallOp GetCallOp() { return call_op_; } FlatSymbolRefAttr GetFunction() { return call_op_.getCalleeAttr(); } private: explicit UniformQuantizeFunctionCallPattern(func::CallOp call_op) : call_op_(call_op) {} func::CallOp call_op_; }; // Matches the following pattern that represents uniform dequantization. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 64.6K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// the allocation is part of a bigger one and it's probably not worth // eagerly collapsing). // // alloc may only run concurrently with find. func (s *scavengeIndex) alloc(ci chunkIdx, npages uint) { sc := s.chunks[ci].load() sc.alloc(npages, s.gen) // TODO(mknyszek): Consider eagerly backing memory with huge pages // here and track whether we believe this chunk is backed by huge pages.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/net/netip/netip_test.go
// Optimizations are required to remove some allocs. t.Skipf("skipping on %v", testenv.Builder()) } allocs := int(testing.AllocsPerRun(1000, func() { sinkString = tc.ip.String() })) if allocs != tc.wantAllocs { t.Errorf("allocs=%d, want %d", allocs, tc.wantAllocs) } }) } } func TestPrefixString(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 17:10:01 UTC 2024 - 54.3K bytes - Viewed (0) -
src/bytes/bytes_test.go
func TestEqual(t *testing.T) { // Run the tests and check for allocation at the same time. allocs := testing.AllocsPerRun(10, func() { for _, tt := range compareTests { eql := Equal(tt.a, tt.b) if eql != (tt.i == 0) { t.Errorf(`Equal(%q, %q) = %v`, tt.a, tt.b, eql) } } }) if allocs > 0 { t.Errorf("Equal allocated %v times", allocs) } } func TestEqualExhaustive(t *testing.T) { var size = 128
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 03 12:58:37 UTC 2024 - 56.5K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go
if fullCapacityBite > capacity { break } prevAlloc = demand alloc[idx] = demand capacity -= fullCapacityBite } for j := next; j < count; j++ { alloc[indices[j]] = prevAlloc + capacity/float64(count-next) } return alloc } func TestFairAlloc(t *testing.T) { if e, a := []float64{0, 0}, fairAlloc([]float64{0, 0}, 42); !reflect.DeepEqual(e, a) {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Sep 26 12:55:23 UTC 2023 - 58.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_import.cc
return "main"; } if (subgraph.name.empty()) { return llvm::formatv("fn_{0}", index).str(); } return subgraph.name; } // Adds a CallOp in `region` to call the `func` and returns the results of // CallOp. void AddCallOpInWhileOpRegion(mlir::Region& region, mlir::func::FuncOp func) { OpBuilder op_builder{region}; region.push_back(new mlir::Block()); Location loc = region.getLoc();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 66.8K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/sys/windows/security_windows.go
// Go-allocated one, make sure that the Go allocation is aligned to the // pointer size. const psize = int(unsafe.Sizeof(uintptr(0))) alloc := make([]uintptr, (sdLen+psize-1)/psize) dst := unsafe.Slice((*byte)(unsafe.Pointer(&alloc[0])), sdLen) copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 16:19:04 UTC 2024 - 52.5K bytes - Viewed (0) -
src/strings/strings_test.go
} } haystack := "test世界" allocs := testing.AllocsPerRun(1000, func() { if i := IndexRune(haystack, 's'); i != 2 { t.Fatalf("'s' at %d; want 2", i) } if i := IndexRune(haystack, '世'); i != 4 { t.Fatalf("'世' at %d; want 4", i) } }) if allocs != 0 && testing.CoverMode() == "" { t.Errorf("expected no allocations, got %f", allocs) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 03 12:58:37 UTC 2024 - 53K bytes - Viewed (0)