Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 33 for createEl (0.25 sec)

  1. tensorflow/c/eager/parallel_device/parallel_device_remote_test.cc

      ASSERT_TRUE(tensorflow::GrpcServer::Create(
                      server_def, tensorflow::Env::Default(), &worker_server1)
                      .ok());
      ASSERT_TRUE(worker_server1->Start().ok());
    
      server_def.set_task_index(2);
      std::unique_ptr<tensorflow::GrpcServer> worker_server2;
      ASSERT_TRUE(tensorflow::GrpcServer::Create(
                      server_def, tensorflow::Env::Default(), &worker_server2)
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 27 22:09:57 GMT 2023
    - 6.7K bytes
    - Viewed (0)
  2. tensorflow/c/c_api.cc

              "and will trigger an error in the future. Either don't modify "
              "nodes after running them or create a new session.");
        }
      }
    }
    
    namespace {
    
    // Helper method that creates a shape handle for a shape described by dims.
    tensorflow::shape_inference::ShapeHandle ShapeHandleFromDims(
        tensorflow::shape_inference::InferenceContext* ic, int num_dims,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 102.3K bytes
    - Viewed (0)
  3. tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem_helper.cc

      // When creating file, use the same permissions as original
      mode_t open_mode = mode & (S_IRWXU | S_IRWXG | S_IRWXO);
    
      // O_WRONLY | O_CREAT | O_TRUNC:
      //   Open file for write and if file does not exist, create the file.
      //   If file exists, truncate its size to 0.
      int dst_fd = open(dst, O_WRONLY | O_CREAT | O_TRUNC, open_mode);
      if (dst_fd < 0) {
        close(src_fd);
        return -1;
      }
    
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Thu Jan 16 05:36:52 GMT 2020
    - 2.1K bytes
    - Viewed (1)
  4. tensorflow/c/eager/c_api_unified_experimental.cc

    using tensorflow::tracing::TracingTensorHandle;
    
    void TF_SetTracingImplementation(const char* name, TF_Status* s) {
      tsl::Set_TF_Status_from_Status(s, SetDefaultTracingEngine(name));
    }
    
    // Creates a new TensorFlow function, it is an execution context attached to a
    // given tracing context.
    TF_ExecutionContext* TF_CreateFunction(const char* fn_name, TF_Status* s) {
      return wrap(CreateTracingExecutionContext(fn_name, s));
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 9K bytes
    - Viewed (0)
  5. tensorflow/c/eager/parallel_device/parallel_device_testlib.cc

    }
    
    // Passed to `TF_NewTensor` to indicate how an array of floats should be
    // deleted.
    static void FloatDeallocator(void* data, size_t, void* arg) {
      delete[] static_cast<float*>(data);
    }
    
    // Creates a TFE_TensorHandle with value `v`.
    TensorHandlePtr FloatTensorHandle(float v, TF_Status* status) {
      const int num_bytes = sizeof(float);
      float* values = new float[1];
      values[0] = v;
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Jun 15 15:44:44 GMT 2021
    - 12.5K bytes
    - Viewed (0)
  6. tensorflow/c/eager/c_api_distributed_test.cc

    // Test to ensure that a registered graph optimization pass is only executed
    // once (i.e., on the main function side) in running distributed functions.
    // This test creates a cluster with two workers, create a variable on the
    // second worker, and run a distributed function (VariableAddFunction) whose ops
    // span the local and remote workers. If the graph optimization pass is executed
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 23.5K bytes
    - Viewed (0)
  7. tensorflow/c/experimental/next_pluggable_device/c_api.cc

          container_name, plugin_resource_name, &tf_plugin_resource,
          [plugin_resource_name, create_func, create_func_args,
           delete_func](tensorflow::PluginResource** new_resource) {
            void* opaque_plugin_resource = create_func(create_func_args);
            *new_resource = new tensorflow::PluginResource(
                opaque_plugin_resource, plugin_resource_name, delete_func);
    C++
    - Registered: Tue Feb 27 12:39:08 GMT 2024
    - Last Modified: Tue Jan 09 00:52:04 GMT 2024
    - 13.9K bytes
    - Viewed (1)
  8. tensorflow/c/eager/parallel_device/parallel_device_test.cc

      RegisterParallelDevice(context.get(), device_name, underlying_devices,
                             status.get());
      ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
    
      // Create two vectors with different lengths
      std::vector<float> size_two_value{1., 2.};
      std::vector<float> size_three_value{1., 2., 3.};
      TensorHandlePtr size_two(
          VectorFloatTensorHandle(size_two_value, status.get()));
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Jul 08 23:47:35 GMT 2021
    - 29.3K bytes
    - Viewed (1)
  9. tensorflow/c/experimental/grappler/grappler_test.cc

      params->struct_size = TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE;
      params->optimizer_configs->struct_size = TP_OPTIMIZER_CONFIGS_STRUCT_SIZE;
      params->optimizer->struct_size = TP_OPTIMIZER_STRUCT_SIZE;
      params->optimizer->create_func = nullptr;
      params->optimizer->optimize_func = optimize_func;
      params->optimizer->destroy_func = nullptr;
    }
    
    TEST(Grappler, SuccessfulRegistration) {
    C++
    - Registered: Tue Feb 27 12:39:08 GMT 2024
    - Last Modified: Thu Apr 13 22:30:58 GMT 2023
    - 11.6K bytes
    - Viewed (0)
  10. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

          : status_(TF_NewStatus()),
            // If the context's default exector is set to async, re-using that in
            // each thread would cause collectives to deadlock. For consistency we
            // create a new sync executor for every thread.
            //
            // TODO(allenl): We should have an async API that works with the
            // parallel device.
            device_(device),
            executor_(
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
Back to top