Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 34 for shared_ptr (0.21 sec)

  1. tensorflow/cc/framework/scope_internal.h

      // WithControlDependencies() would share the same NameMap with the parent.
      typedef std::unordered_map<string, int> NameMap;
    
      Impl(const std::shared_ptr<Graph>& graph,
           const std::shared_ptr<Status>& status,
           const std::shared_ptr<NameMap>& name_map,
           const std::shared_ptr<ShapeRefiner>& refiner);
    
      const string& name() const { return name_; }
      const std::vector<Operation>& control_deps() const { return control_deps_; }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 15:46:43 UTC 2022
    - 5.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_device_context.h

    class XlaDeviceContext : public DeviceContext {
     public:
      explicit XlaDeviceContext(
          std::shared_ptr<se::Stream> compute_stream,
          std::shared_ptr<se::Stream> host_to_device_stream,
          std::shared_ptr<se::Stream> device_to_host_stream,
          std::vector<std::shared_ptr<se::Stream>> device_to_device_streams,
          xla::LocalClient* client,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 5.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_device_context.cc

        return false;
      }
      return stream_executor_->ClearAllocatorStats();
    }
    
    XlaDeviceContext::XlaDeviceContext(
        std::shared_ptr<se::Stream> compute_stream,
        std::shared_ptr<se::Stream> host_to_device_stream,
        std::shared_ptr<se::Stream> device_to_host_stream,
        std::vector<std::shared_ptr<se::Stream>> device_to_device_streams,
        xla::LocalClient* client,
        XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  4. tensorflow/cc/framework/scope.cc

          colocation_constraints_(),
          disable_shape_inference_(disable_shape_inference) {}
    
    Scope::Impl::Impl(const std::shared_ptr<Graph>& graph,
                      const std::shared_ptr<Status>& status,
                      const std::shared_ptr<NameMap>& name_map,
                      const std::shared_ptr<ShapeRefiner>& refiner)
        : graph_(graph),
          status_(status),
          name_map_(name_map),
          refiner_(refiner),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_kernel_creator.cc

    #include "tensorflow/core/lib/core/status.h"
    #include "tsl/platform/errors.h"
    
    namespace tensorflow {
    
    bool XlaKernelCreator::CanCreateKernel(
        const FunctionLibraryRuntime& flr,
        const std::shared_ptr<const NodeProperties>& props) const {
      return CanCreateXlaKernel(props->node_def) &&
             !XlaOpRegistry::IsCompilationDevice(flr.device()->device_type());
    }
    
    static Status CreateXlaKernel(FunctionLibraryRuntime* flr,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 22:24:01 UTC 2023
    - 4.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_kernel_creator.h

      bool CanCreateKernel(
          const FunctionLibraryRuntime& flr,
          const std::shared_ptr<const NodeProperties>& props) const override;
    
      // Given a supported NodeDef, returns a XlaLaunchOp that computes the node.
      Status CreateKernel(FunctionLibraryRuntime* flr,
                          const std::shared_ptr<const NodeProperties>& props,
                          std::unique_ptr<OpKernel>* kernel) const override;
    };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 26 19:43:17 UTC 2023
    - 1.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_platform_info.h

      explicit XlaPlatformInfo(
          const DeviceType device_type, se::Platform::Id platform_id,
          const XlaDevice::Metadata* xla_device_metadata,
          const PjRtBaseDevice::Metadata* pjrt_device_metadata,
          std::shared_ptr<se::DeviceMemoryAllocator> device_allocator)
          : device_type_(device_type),
            platform_id_(platform_id),
            xla_device_metadata_(xla_device_metadata),
            pjrt_device_metadata_(pjrt_device_metadata),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_device.cc

        TF_RETURN_IF_ERROR(EnsureStreamOkLocked(backend, "stream", &stream_,
                                                &need_new_device_context));
      }
    
      std::shared_ptr<se::Stream> host_to_device_stream;
      std::shared_ptr<se::Stream> device_to_host_stream;
      std::vector<std::shared_ptr<se::Stream>> device_to_device_streams;
      if (use_multiple_streams_) {
        TF_RETURN_IF_ERROR(EnsureStreamOkLocked(backend, "host_to_device_stream",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  9. tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc

    bool RamFileBlockCache::BlockNotStale(const std::shared_ptr<Block>& block) {
      absl::MutexLock l(&block->mu);
      if (block->state != FetchState::FINISHED) {
        return true;  // No need to check for staleness.
      }
      if (max_staleness_ == 0) return true;  // Not enforcing staleness.
      return timer_seconds_() - block->timestamp <= max_staleness_;
    }
    
    std::shared_ptr<RamFileBlockCache::Block> RamFileBlockCache::Lookup(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 16 01:39:09 UTC 2020
    - 11.1K bytes
    - Viewed (0)
  10. tensorflow/cc/experimental/libtf/value.h

    class TaggedValueHash;
    using List = std::vector<TaggedValue>;
    using ListPtr = std::shared_ptr<List>;
    using Dict =
        absl::flat_hash_map<TaggedValue, TaggedValue, TaggedValueHash<TaggedValue>>;
    using DictPtr = std::shared_ptr<Dict>;
    using TuplePtr = std::shared_ptr<Tuple>;
    using Func =
        std::function<absl::StatusOr<TaggedValue>(TaggedValue, TaggedValue)>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:23:45 UTC 2024
    - 20.4K bytes
    - Viewed (0)
Back to top