Search Options

Results per page
Sort
Preferred Languages
Advance

Results 131 - 140 of 1,997 for Reserve (0.15 sec)

  1. tensorflow/cc/framework/cc_op_gen_util.cc

      if (slash_pos == path.npos) slash_pos = -1;
      size_t dot_pos = path.rfind('.');
      return string(path.substr(slash_pos + 1, dot_pos - (slash_pos + 1)));
    }
    
    string ToGuard(StringPiece path) {
      string guard;
      guard.reserve(path.size() + 1);  // + 1 -> trailing _
      for (const char c : path) {
        if (c >= 'A' && c <= 'Z') {
          guard += c;
        } else if (c >= 'a' && c <= 'z') {
          guard += c + 'A' - 'a';
        } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Feb 26 00:57:05 UTC 2024
    - 25K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_merge_variables_with_execute.cc

      // result types with the new merged execute result types.
      llvm::SmallVector<Type, 8> output_types;
      const int parallel_execute_num_results = parallel_execute_op->getNumResults();
      output_types.reserve(parallel_execute_num_results);
      Region* execute_region = merged_execute_launch->getParentRegion();
      const int region_index = execute_region->getRegionNumber();
      const int num_results_before_region =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 29 17:52:11 UTC 2024
    - 27K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/build_xla_ops_pass.cc

                                      const XlaClusterInfo& cluster_info,
                                      const DebuggingOpts& debugging_opts) {
      std::vector<Output> xla_run_args;
      xla_run_args.reserve(cluster_info.non_constant_inputs.size() +
                           cluster_info.resource_inputs.size());
      int input_idx = 0;
      for (const Output& o : cluster_info.non_constant_inputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  4. src/runtime/race_ppc64le.s

    	MOVD	0(R10), R11
    	MOVD	g_m(R11), R3
    	MOVD	m_p(R3), R3
    	MOVD	p_raceprocctx(R3), R3
    	MOVD	R3, (R4)
    	RET
    
    rest:
    	// Save registers according to the host PPC64 ABI
    	// and reserve 16B for argument storage.
    	STACK_AND_SAVE_HOST_TO_GO_ABI(16)
    
    	// Load g, and switch to g0 if not already on it.
    	MOVD	runtime·tls_g(SB), R10
    	MOVD	0(R10), g
    
    	MOVD	g_m(g), R7
    	MOVD	m_g0(R7), R8
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 17K bytes
    - Viewed (0)
  5. src/runtime/sys_openbsd_amd64.s

    	// Transition from C ABI to Go ABI.
    	PUSH_REGS_HOST_TO_ABI0()
    
    	// Set up ABIInternal environment: g in R14, cleared X15.
    	get_tls(R12)
    	MOVQ	g(R12), R14
    	PXOR	X15, X15
    
    	// Reserve space for spill slots.
    	NOP	SP		// disable vet stack checking
    	ADJSP   $24
    
    	// Call into the Go signal handler
    	MOVQ	DI, AX	// sig
    	MOVQ	SI, BX	// info
    	MOVQ	DX, CX	// ctx
    	CALL	·sigtrampgo<ABIInternal>(SB)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 06 18:49:01 UTC 2023
    - 15.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/merge_control_flow.cc

        auto first_if_ops = GetAllOpsFromIf(*it);
        middle_if_ops.insert(first_if_ops.begin(), first_if_ops.end());
      }
    
      llvm::SmallVector<Operation*, 8> moved_ops_ordered;
      moved_ops_ordered.reserve(all_moved_ops.size());
      for (Operation& op : *last_if_op->getBlock()) {
        if (all_moved_ops.count(&op)) {
          moved_ops_ordered.push_back(&op);
        }
      }
    
      return moved_ops_ordered;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 17 07:31:01 UTC 2023
    - 25.9K bytes
    - Viewed (0)
  7. pkg/util/iptree/iptree.go

    }
    
    // prefixContainIP returns true if the given IP is contained with the prefix,
    // is not the network address and also, if IPv4, is not the broadcast address.
    // This is required because the Kubernetes allocators reserve these addresses
    // so IPAddresses can not block deletion of this ranges.
    func prefixContainIP(prefix netip.Prefix, ip netip.Addr) bool {
    	// if the IP is the network address is not contained
    	if prefix.Masked().Addr() == ip {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Oct 31 21:05:04 UTC 2023
    - 17.7K bytes
    - Viewed (0)
  8. pkg/kubelet/cm/cpumanager/cpu_assignment_test.go

    		},
    		{
    			"take a socket and a NUMA node of cpus from dual socket with multi-numa-per-socket with HT, a core taken",
    			topoDualSocketMultiNumaPerSocketHT,
    			mustParseCPUSet(t, "1-39,41-79"), // reserve the first (phys) core (0,40)
    			60,
    			"",
    			mustParseCPUSet(t, "10-39,50-79"),
    		},
    	}
    }
    
    func TestTakeByTopologyNUMAPacked(t *testing.T) {
    	testCases := commonTakeByTopologyTestCases(t)
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Mon Oct 09 17:31:37 UTC 2023
    - 22.7K bytes
    - Viewed (0)
  9. src/runtime/debuglog.go

    	// bytes of delta in the record header.
    	const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
    	if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
    		l.w.writeSync(tick, nano)
    	}
    
    	// Reserve space for framing header.
    	l.w.ensure(debugLogHeaderSize)
    	l.w.write += debugLogHeaderSize
    
    	// Write record header.
    	l.w.uvarint(tick - l.w.tick)
    	l.w.uvarint(nano - l.w.nano)
    	gp := getg()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 02 15:10:48 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc

        return nullptr;
      }
    
      auto value_shape_type = mlir::RankedTensorType::get(
          {pad_dimensions}, builder->getIntegerType(32));
    
      SmallVector<int32_t, 4> value_i32;
      value_i32.reserve(pad_dimensions);
      for (int i = 0; i < pad_dimensions - attr.getNumElements(); ++i) {
        value_i32.push_back(value_to_pad);
      }
      for (const auto& size : attr) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 25.4K bytes
    - Viewed (0)
Back to top