Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for WithAssignedDevice (0.22 sec)

  1. tensorflow/compiler/jit/clone_constants_for_better_clustering_test.cc

    }
    
    TEST(CloneConstantsForBetterClusteringTest, HostConstantPlacedOnCpu) {
      Scope root = Scope::NewRootScope().ExitOnError();
      Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU);
      Scope on_cpu = root.WithAssignedDevice(kCPU).WithDevice(kCPU);
    
      Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT);
      Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc

      *result = std::move(graph);
      return absl::OkStatus();
    }
    
    TEST(SliceToDynamicSliceRewriteTest, Basic) {
      Scope root = Scope::NewRootScope()
                       .ExitOnError()
                       .WithAssignedDevice(kDeviceName)
                       .WithXlaCluster("cluster_0");
    
      Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
      Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_cluster_util_test.cc

    }
    
    TEST(IsSingleGpuGraph, ReturnsFalseForMultiGpuGraph) {
      Scope root = Scope::NewRootScope().WithAssignedDevice(kGPU0).ExitOnError();
    
      Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
      Output b = ops::Add(root.WithOpName("b").WithAssignedDevice(kGPU1), a, a);
      Output c = ops::Add(root.WithOpName("c"), b, b);
    
      FixupSourceAndSinkEdges(root.graph());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/build_xla_ops_pass.cc

            string cpu_device = "/job:localhost/replica:0/task:0/device:CPU:0";
            ops::Print print_op(s.WithOpName("print_", oidx)
                                    .WithDevice(cpu_device)
                                    .WithAssignedDevice(cpu_device),
                                new_output, {new_output},
                                ops::Print::Attrs{}
                                    .Message(absl::StrCat("output ", oidx, " from ",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc

          NewInternalScope(g, &status, /*refiner=*/nullptr)
              .WithXlaCluster(string(cluster_name))
              .NewSubScope(absl::StrCat(slice->name(), "/static_shaped_slice"));
      Scope host_scope = main_scope.WithAssignedDevice(host_name);
    
      // In the future we may want to be clever here and avoid the extra Cast ops.
      SliceInputs slice_inputs_int64 =
          MakeSliceIndexAndSizeInt64(host_scope, slice_inputs);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 13.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_activity_listener_test.cc

      }
    
      TestListener* listener() const { return listener_; }
    
     private:
      TestListener* listener_;
    };
    
    GraphDef CreateGraphDef() {
      Scope root = Scope::NewRootScope().ExitOnError().WithAssignedDevice(
          "/job:localhost/replica:0/task:0/device:CPU:0");
      Output a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT);
      for (int i = 0; i < 5; i++) {
        a = ops::MatMul(root.WithOpName(absl::StrCat("matmul_", i)), a, a);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/build_xla_ops_pass_test.cc

      const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:GPU:0";
      Scope root = Scope::NewRootScope()
                       .WithDevice(kXlaDeviceName)
                       .WithAssignedDevice(kXlaDeviceName)
                       .ExitOnError();
    
      FunctionDefLibrary fdef_lib =
          CreateFunctionDefLibWithInt32Input("cluster_int32");
      TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 12.2K bytes
    - Viewed (0)
  8. tensorflow/cc/framework/scope.h

      Scope WithDevice(const string& device) const;
    
      /// Returns a new scope.  All ops created within the returned scope will have
      /// their assigned device set to `assigned_device`.
      Scope WithAssignedDevice(const string& assigned_device) const;
    
      /// Returns a new scope.  All ops created within the returned scope will have
      /// their _XlaCluster attribute set to `xla_cluster`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 09:08:33 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  9. tensorflow/cc/framework/scope.cc

                            /* clear_control_deps */ true));
    }
    
    Scope Scope::WithDevice(const string& device) const {
      return Scope(new Impl(*this, Impl::Tags::Device(), device));
    }
    
    Scope Scope::WithAssignedDevice(const string& assigned_device) const {
      return Scope(new Impl(*this, Impl::Tags::AssignedDevice(), assigned_device));
    }
    
    Scope Scope::WithXlaCluster(const string& xla_cluster) const {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 20.9K bytes
    - Viewed (0)
Back to top