Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for xaxis (0.12 sec)

  1. tensorflow/c/eager/c_api_experimental.h

      // retrieving shapes of tensors they wrap until the custom device tensor's
      // shape is explicitly requested where possible.
      int (*num_dims)(void* data, TF_Status* status);
    
      // Computes the axis length at `dim_index`.
      int64_t (*dim)(void* data, int dim_index, TF_Status* status);
    
      void (*deallocator)(void* data);
    
      // Summarizes the value of this tensor. The caller takes ownership of the
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Feb 21 22:37:46 GMT 2024
    - 39.5K bytes
    - Viewed (0)
  2. tensorflow/c/eager/c_api_test_util.cc

    TFE_Op* MinOp(TFE_Context* ctx, TFE_TensorHandle* input,
                  TFE_TensorHandle* axis) {
      TF_Status* status = TF_NewStatus();
    
      TFE_Op* op = TFE_NewOp(ctx, "Min", status);
      CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
      TFE_OpAddInput(op, input, status);
      CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
      TFE_OpAddInput(op, axis, status);
      CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Feb 21 22:37:46 GMT 2024
    - 23.5K bytes
    - Viewed (2)
  3. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

          } else {
            // Generalize differing axis lengths to "variable"/"unknown".
            for (int axis_index = 0; axis_index < combined_shape.dims();
                 ++axis_index) {
              int64_t axis_length = combined_shape.dim_size(axis_index);
              if (axis_length != component_shape.dim_size(axis_index)) {
                axis_length = -1;
              }
              TF_RETURN_IF_ERROR(
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
  4. tensorflow/c/eager/parallel_device/parallel_device.cc

            // Non-parallel tensors from _EagerConst/tf.constant are implicitly
            // broadcast, i.e. set as the input to each parallel operation. This
            // allows code like "tf.constant(1.)" or "tf.reduce_sum(..., axis=1)"
            // (where the value starts on the host), without allowing other implicit
            // copies/broadcasts. Other implicit copies may be supported eventually,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Mar 29 22:05:31 GMT 2023
    - 18.3K bytes
    - Viewed (0)
Back to top