Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for add_tensor (0.42 sec)

  1. tensorflow/compiler/mlir/tensorflow/utils/export_utils.cc

          }
        } else if (auto attr = mlir::dyn_cast<mlir::ElementsAttr>(a)) {
          TensorProto tensor;
          TF_RETURN_IF_ERROR(ConvertToTensorProto(attr, &tensor));
          *list->add_tensor() = tensor;
        } else if (auto attr = mlir::dyn_cast<mlir::FlatSymbolRefAttr>(a)) {
          AttrValue attr_val;
          TF_RETURN_IF_ERROR(ConvertAttribute(attr, &attr_val));
          *list->add_func() = attr_val.func();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 19.7K bytes
    - Viewed (0)
  2. tensorflow/c/kernels_test.cc

        return static_cast<void*>(s);
      };
    
      AttrValue v;
      ::tensorflow::TensorProto* tensor_proto1 = v.mutable_list()->add_tensor();
      *tensor_proto1 = TensorProtoHelpers::GenerateTensorProto1();
    
      ::tensorflow::TensorProto* tensor_proto2 = v.mutable_list()->add_tensor();
      *tensor_proto2 = TensorProtoHelpers::GenerateTensorProto2();
    
      CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrTensorList", v);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 50.4K bytes
    - Viewed (0)
  3. tensorflow/c/eager/dlpack.cc

        return nullptr;
      }
      TF_DataType dtype;
      Status s = TfDataTypeFormDlDataType(dl_tensor->dtype, &dtype);
      if (!s.ok()) {
        status->status = std::move(s);
        return nullptr;
      }
      int num_dims = dl_tensor->ndim;
      const int64_t* dims = dl_tensor->shape;
      void* data = dl_tensor->data;
    
      if (dl_tensor->byte_offset != 0) {
        status->status = tensorflow::errors::InvalidArgument(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 15 09:49:45 UTC 2024
    - 12.8K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.cc

      tensorflow::AsyncValueTensor* av_tensor =
          tensorflow::AsyncValueTensor::FromTensor(tensor);
      if (av_tensor == nullptr || av_tensor->GetBuffer() == nullptr) {
        return absl::InternalError("Input tensor does not have PjRtBuffer.");
      }
      auto* c_api_buffer =
          dynamic_cast<xla::PjRtCApiBuffer*>(av_tensor->GetBuffer().get());
      if (c_api_buffer == nullptr) {
        return absl::InternalError(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 05:48:24 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc

              xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
              nullptr, pjrt_client->addressable_devices()[0]));
      tensorflow::AsyncValueTensor* av_tensor =
          tensorflow::AsyncValueTensor::FromTensor(&tensor);
      av_tensor->SetBuffer(std::move(buffer));
    
      EXPECT_THAT(
          GetPjRtCBufferFromTensor(&tensor),
          StatusIs(
              error::INTERNAL,
              HasSubstr(absl::StrCat(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 01 16:29:40 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  6. tensorflow/c/experimental/next_pluggable_device/c_api.cc

                                                      TF_Status* status) {
      auto* cc_ctx = reinterpret_cast<tensorflow::OpKernelContext*>(ctx);
      const tensorflow::Tensor& arg_tensor = cc_ctx->input(index);
      absl::Status cc_status;
      if (arg_tensor.dtype() != tensorflow::DT_RESOURCE) {
        cc_status = absl::InvalidArgumentError(
            absl::StrCat("Trying to obtain resource handle from Input[", index,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 05:48:24 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_launch_util.cc

        //
        // 3. AsyncValueTensor, containing a PjRtBuffer. This is the legacy mode
        // and certain device type (e.g. TPU) still uses this path.
        AsyncValueTensor* av_tensor = AsyncValueTensor::FromTensor(tensor);
        if (use_pjrt_tensor_buffer) {
          if (av_tensor != nullptr) {
            return absl::InvalidArgumentError(
                "If use_pjrt_tensor_buffer is set, the input tensor should not "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/utils/lstm_utils.cc

      auto begin_tensor = CreateI32DenseConst(builder, begin, loc);
      auto end_tensor = CreateI32DenseConst(builder, end, loc);
      auto strides_tensor = CreateI32DenseConst(builder, strides, loc);
    
      return builder->create<TF::StridedSliceOp>(
          loc, output_type, input, begin_tensor, end_tensor, strides_tensor,
          builder->getI64IntegerAttr(begin_mask),
          builder->getI64IntegerAttr(end_mask),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 36.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/schema/schema_generated.h

    };
    
    struct SubGraphBuilder {
      typedef SubGraph Table;
      ::flatbuffers::FlatBufferBuilder &fbb_;
      ::flatbuffers::uoffset_t start_;
      void add_tensors(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::Tensor>>> tensors) {
        fbb_.AddOffset(SubGraph::VT_TENSORS, tensors);
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 1M bytes
    - Viewed (0)
Back to top