- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 138 for Malloc (0.12 sec)
-
tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc
class SideEffects { enum Type { kAlloc = 0, kFree = 1, kRead = 2, kWrite = 3 }; public: bool IsAlloc() const { return effects_.test(kAlloc); } bool IsFree() const { return effects_.test(kFree); } bool IsRead() const { return effects_.test(kRead); } bool IsWrite() const { return effects_.test(kWrite); } bool IsAllocOnly() const { return IsAlloc() && effects_.count() == 1; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 15 09:04:13 UTC 2024 - 41.2K bytes - Viewed (0) -
pkg/registry/core/service/portallocator/controller/repair.go
return &Repair{ interval: interval, serviceClient: serviceClient, portRange: portRange, alloc: alloc, leaks: map[int]int{}, broadcaster: eventBroadcaster, recorder: recorder, } } // RunUntil starts the controller until the provided ch is closed. func (c *Repair) RunUntil(onFirstSuccess func(), stopCh chan struct{}) { c.broadcaster.StartRecordingToSink(stopCh)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat Sep 30 15:46:06 UTC 2023 - 10.4K bytes - Viewed (0) -
src/internal/trace/event/go122/event.go
) // Experiments. const ( // AllocFree is the alloc-free events experiment. AllocFree event.Experiment = 1 + iota ) // Experimental events. const ( _ event.Type = 127 + iota // Experimental events for AllocFree. // Experimental heap span events. Added in Go 1.23. EvSpan // heap span exists [timestamp, id, npages, type/class] EvSpanAlloc // heap span alloc [timestamp, id, npages, type/class]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 14.8K bytes - Viewed (0) -
src/runtime/export_test.go
if s.isUnusedUserArenaChunk() { continue } if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 { slow.Mallocs++ slow.Alloc += uint64(s.elemsize) } else { slow.Mallocs += uint64(s.allocCount) slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize) bySize[sizeclass].Mallocs += uint64(s.allocCount) } } // Add in frees by just reading the stats for those directly.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/raise_to_tf.cc
// the new TF op is an unranked tensor with element type derived. class RewriteTFRCallOp : public OpRewritePattern<CallOp> { using OpRewritePattern<CallOp>::OpRewritePattern; public: explicit RewriteTFRCallOp(MLIRContext* context, const SymbolTable& table, bool materialize_derived_attrs) : OpRewritePattern<CallOp>(context), symbol_table_(table), materialize_derived_attrs_(materialize_derived_attrs) {}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21.8K bytes - Viewed (0) -
pkg/registry/core/service/ipallocator/controller/repair.go
} } return nil } func (c *Repair) saveSnapShot(rebuilt *ipallocator.Range, alloc rangeallocation.RangeRegistry, snapshot *api.RangeAllocation) error { if err := rebuilt.Snapshot(snapshot); err != nil { return fmt.Errorf("unable to snapshot the updated service IP allocations: %v", err) } if err := alloc.CreateOrUpdate(snapshot); err != nil { if errors.IsConflict(err) { return err }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat Sep 30 15:46:06 UTC 2023 - 13.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// Create a new `CallOp` that calls `callee_func_op`. rewriter.setInsertionPoint(xla_call_module_op); auto call_op = rewriter.create<func::CallOp>(xla_call_module_op.getLoc(), callee_func_op, xla_call_module_op.getArgs()); // Transfer the `kQuantizationMethodAttr` attribute to the `CallOp`,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
pkg/registry/core/service/storage/storage.go
rest.Getter rest.GracefulDeleter } type PodStorage interface { rest.Getter } type REST struct { *genericregistry.Store primaryIPFamily api.IPFamily secondaryIPFamily api.IPFamily alloc Allocators endpoints EndpointsStorage pods PodStorage proxyTransport http.RoundTripper } var ( _ rest.CategoriesProvider = &REST{}
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Apr 11 13:09:33 UTC 2024 - 22.3K bytes - Viewed (0) -
src/net/udpsock_test.go
t.Fatal(err) } }) if got := int(allocs); got != 0 { t.Errorf("WriteToUDPAddrPort/ReadFromUDPAddrPort allocated %d objects", got) } allocs = testing.AllocsPerRun(1000, func() { _, err := conn.WriteTo(buf, addr) if err != nil { t.Fatal(err) } _, _, err = conn.ReadFromUDP(buf) if err != nil { t.Fatal(err) } }) if got := int(allocs); got != 1 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Sep 18 17:20:52 UTC 2023 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/region_control_flow_to_functional.cc
it++; if (it == block.rend()) return std::nullopt; } // Check if there is a Call before the Yield. func::CallOp call = dyn_cast<func::CallOp>(*it++); if (!call) return std::nullopt; if (allow_to_bool && call.getNumResults() == 1 && yield->getNumOperands() != 1) { // Allow patterns of the form // %cond = call(...) // yield %cond, [...passthrough args...]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.7K bytes - Viewed (0)