Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 336 for shape_ (0.09 sec)

  1. tensorflow/c/eager/immediate_execution_tensor_handle.h

      //
      // -1 indicates an unknown axis length; this is unreachable for most standard
      // ImmediateExecutionTensorHandles, but comes up for example when computing
      // the shape of a parallel tensor with component shapes differing across
      // devices.
      virtual absl::Status Dim(int dim_index, int64_t* dim) const = 0;
    
      // Returns the device which created the handle.
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Sat Oct 12 05:11:17 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  2. tensorflow/c/c_api_experimental.h

        TF_ShapeAndTypeList** shape_list_array, int num_items);
    
    // Infer shapes for the given `op`. The arguments mimic the arguments of the
    // `shape_inference::InferenceContext` constructor. Note the following:
    //   - The inputs of the `op` are not used for shape inference. So, it is
    //     OK to not have the inputs properly set in `op`. See `input_tensors`
    //     if you want shape inference to consider the input tensors of the
    //     op for shape inference.
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Thu Apr 27 21:07:00 UTC 2023
    - 15.1K bytes
    - Viewed (0)
  3. tensorflow/c/eager/dlpack_test.cc

      dltensor_in->device = {kDLCPU, 0};
      dltensor_in->ndim = static_cast<int32_t>(shape.size());
      dltensor_in->dtype = {kDLFloat, 32, 1};
      dltensor_in->shape = shape.data();
      dltensor_in->strides = strides.data();
      TFE_TensorHandle* handle = TFE_HandleFromDLPack(&dlm_in, status, ctx);
      ASSERT_NE(handle, nullptr)
          << TF_Message(status) << " (shape=[" << absl::StrJoin(shape, ",")
          << "], strides=[" << absl::StrJoin(strides, ",") << "])";
    
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Fri Jun 30 03:04:46 UTC 2023
    - 4.4K bytes
    - Viewed (0)
  4. cmd/shared-lock.go

    Anis Elleuch <******@****.***> 1676280398 +0100
    Registered: Sun Nov 03 19:28:11 UTC 2024
    - Last Modified: Mon Feb 13 09:26:38 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  5. tensorflow/c/eager/c_api_test_util.h

    TFE_Op* MatMulOp(TFE_Context* ctx, TFE_TensorHandle* a, TFE_TensorHandle* b);
    
    // Return an identity op.
    TFE_Op* IdentityOp(TFE_Context* ctx, TFE_TensorHandle* a);
    
    // Return a shape op fetching the shape of `a`.
    TFE_Op* ShapeOp(TFE_Context* ctx, TFE_TensorHandle* a);
    
    // Return an allreduce op adding up input tensor `in` from `group_size` workers.
    TFE_Op* AllReduceOp(TFE_Context* ctx, TFE_TensorHandle* in, int group_size);
    
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Mon Jul 17 23:43:59 UTC 2023
    - 7.7K bytes
    - Viewed (0)
  6. tensorflow/c/eager/abstract_tensor_handle.h

      virtual absl::Status TensorHandleStatus() const;
    
      // Returns tensor shape. If tensor has unknown rank, shape remains untouched.
      virtual absl::Status Shape(tensorflow::PartialTensorShape* shape) const = 0;
    
      // Returns tensor (full) type.
      // While there is no immediate plan to deprecate dtype and shape in favor
      // of only using full type type information, this is a future possibility.
      //
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Sat Oct 12 05:11:17 UTC 2024
    - 3K bytes
    - Viewed (0)
  7. tensorflow/c/eager/c_api_unified_experimental.cc

        return nullptr;
      }
      tensorflow::PartialTensorShape partial_shape;
      if (shape.num_dims != -1) {
        DCHECK(shape.dim_sizes != nullptr);
        Status status = tensorflow::PartialTensorShape::MakePartialShape(
            reinterpret_cast<int64_t*>(shape.dim_sizes), shape.num_dims,
            &partial_shape);
        if (!status.ok()) {
          tsl::Set_TF_Status_from_Status(s, status);
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Sat Oct 12 05:11:17 UTC 2024
    - 9K bytes
    - Viewed (0)
  8. RELEASE.md

    *   Tracing/timeline support for distributed runtime (no GPU profiler yet).
    *   C API gives access to inferred shapes with `TF_GraphGetTensorNumDims` and
        `TF_GraphGetTensorShape`.
    *   Shape functions for core ops have moved to C++ via
        `REGISTER_OP(...).SetShapeFn(...)`. Python shape inference RegisterShape
        calls use the C++ shape functions with `common_shapes.call_cpp_shape_fn`. A
        future release will remove `RegisterShape` from python.
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Tue Oct 22 14:33:53 UTC 2024
    - 735.3K bytes
    - Viewed (0)
  9. tensorflow/c/eager/c_api_test.cc

        ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
    
        TFE_Op* shape_op = ShapeOp(ctx, hgpu);
        TFE_OpSetDevice(shape_op, gpu_device_name.c_str(), status.get());
        ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
        TFE_TensorHandle* retvals[1];
        int num_retvals = 1;
        TFE_Execute(shape_op, &retvals[0], &num_retvals, status.get());
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Thu Aug 03 20:50:20 UTC 2023
    - 94.6K bytes
    - Viewed (0)
  10. tensorflow/c/eager/parallel_device/parallel_device.cc

    // number of dimensions of a parallel tensor.
    int ParallelTensorNumDims(void* data, TF_Status* status) {
      const std::vector<int64_t>* shape;
      absl::Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape);
      if (!s.ok()) {
        tsl::Set_TF_Status_from_Status(status, s);
        return -1;
      }
      return shape->size();
    }
    
    // Used as an argument to TFE_NewCustomDeviceTensorHandle, for computing a
    // dimension of a parallel tensor.
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Mon Oct 21 04:14:14 UTC 2024
    - 18.3K bytes
    - Viewed (0)
Back to top