Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for sids (0.14 sec)

  1. tensorflow/c/c_api.cc

      const int last_node_id = graph->graph.num_node_ids();
      tensorflow::ImportGraphDefResults results;
      status->status = tensorflow::ImportGraphDef(opts->opts, def, &graph->graph,
                                                  &graph->refiner, &results);
      if (!status->status.ok()) return;
    
      // Add new nodes to name_map
      for (int i = last_node_id; i < graph->graph.num_node_ids(); ++i) {
        auto* node = graph->graph.FindNodeId(i);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 102.3K bytes
    - Viewed (0)
  2. tensorflow/c/eager/gradients.cc

      std::vector<int64_t> source_tensor_ids = MakeTensorIDList(sources);
      tensorflow::gtl::FlatSet<int64_t> sources_set(source_tensor_ids.begin(),
                                                    source_tensor_ids.end());
      std::unordered_map<int64_t, TapeTensor> sources_that_are_targets;
      for (int i = 0; i < target_tensor_ids.size(); ++i) {
        int64_t target_id = target_tensor_ids[i];
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 19.3K bytes
    - Viewed (0)
  3. tensorflow/c/eager/c_api_distributed_test.cc

    // once (i.e., on the main function side) in running distributed functions.
    // This test creates a cluster with two workers, create a variable on the
    // second worker, and run a distributed function (VariableAddFunction) whose ops
    // span the local and remote workers. If the graph optimization pass is executed
    // on both the main function side and the component function side, an error will
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 23.5K bytes
    - Viewed (0)
  4. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

    std::unique_ptr<ParallelTensor> ParallelDevice::DeviceIDs(
        TFE_Context* context, TF_Status* status) const {
      std::vector<int32_t> ids;
      ids.reserve(num_underlying_devices());
      for (int i = 0; i < num_underlying_devices(); ++i) {
        ids.push_back(i);
      }
      return ScalarsFromSequence<int32_t>(ids, context, status);
    }
    
    absl::optional<std::vector<std::unique_ptr<ParallelTensor>>>
    ParallelDevice::Execute(TFE_Context* context,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
Back to top