Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 75 for X_dims (0.19 sec)

  1. tensorflow/c/experimental/gradients/nn_grad_test.cc

      float X_vals[] = {1.0f, 2.0f, 3.0f, -5.0f, -4.0f, -3.0f, 2.0f, 10.0f, -1.0f};
      int64_t X_dims[] = {3, 3};
      AbstractTensorHandlePtr X;
      {
        AbstractTensorHandle* X_raw;
        status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
            immediate_execution_ctx_.get(), X_vals, X_dims, 2, &X_raw);
        ASSERT_EQ(errors::OK, status_.code()) << status_.message();
        X.reset(X_raw);
      }
    
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 8.3K bytes
    - Viewed (0)
  2. tensorflow/c/c_api_test.cc

      EXPECT_EQ(2, num_dims);
    
      // Resize the dimension vector appropriately.
      int64_t returned_dims[2];
      TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, num_dims, s);
      ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
      EXPECT_EQ(dims[0], returned_dims[0]);
      EXPECT_EQ(dims[1], returned_dims[1]);
    
      // Set to a new valid shape: [2, 3]
      dims[1] = 3;
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 96.9K bytes
    - Viewed (3)
  3. tensorflow/c/experimental/gradients/tape/tape_operation.cc

                              num_dims_i, " dimensions which is over the limit of ",
                              TensorShape::MaxDimensions(), "."));
        }
        if (num_dims_i < 0) {
          proto[i].set_unknown_rank(true);
        } else {
          const int64_t* dims_i = dims[i];
          auto proto_i = &proto[i];
          for (int d = 0; d < num_dims_i; ++d) {
            proto_i->add_dim()->set_size(dims_i[d]);
          }
        }
      }
    C++
    - Registered: Tue Feb 27 12:39:08 GMT 2024
    - Last Modified: Tue Jun 07 01:53:35 GMT 2022
    - 9K bytes
    - Viewed (1)
  4. tensorflow/c/c_api_experimental.cc

                                     const int64_t* dims, int num_dims) {
      DCHECK(index >= 0 && index < shape_list->num_items);
      TF_ShapeAndType& shape = shape_list->items[index];
      DCHECK(shape.dims == nullptr) << "Shape at " << index << " is already set!";
      DCHECK(num_dims >= 0) << "Number of dimensions cannot be negative!";
      shape.num_dims = num_dims;
      shape.dims = new int64_t[num_dims];
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
  5. tensorflow/c/eager/gradients.cc

                              num_dims_i, " dimensions which is over the limit of ",
                              TensorShape::MaxDimensions(), "."));
        }
        if (num_dims_i < 0) {
          proto[i].set_unknown_rank(true);
        } else {
          const int64_t* dims_i = dims[i];
          auto proto_i = &proto[i];
          for (int d = 0; d < num_dims_i; ++d) {
            proto_i->add_dim()->set_size(dims_i[d]);
          }
        }
      }
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 19.3K bytes
    - Viewed (0)
  6. tensorflow/c/c_test_util.cc

    }
    
    TF_Tensor* Int8Tensor(const int64_t* dims, int num_dims, const char* values) {
      int64_t num_values = 1;
      for (int i = 0; i < num_dims; ++i) {
        num_values *= dims[i];
      }
      TF_Tensor* t =
          TF_AllocateTensor(TF_INT8, dims, num_dims, sizeof(char) * num_values);
      memcpy(TF_TensorData(t), values, sizeof(char) * num_values);
      return t;
    }
    
    TF_Tensor* Int32Tensor(const int64_t* dims, int num_dims,
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Fri Oct 15 03:16:52 GMT 2021
    - 17.8K bytes
    - Viewed (2)
  7. tensorflow/c/experimental/gradients/grad_test_helper.cc

                          absl::Span<const int64_t> dims, double abs_error) {
      TF_Tensor* analytical_tensor;
      auto s = GetValue(t, &analytical_tensor);
      ASSERT_EQ(errors::OK, s.code()) << s.message();
    
      int64_t num_elem_analytical = 1;
      auto num_dims_analytical = TF_NumDims(analytical_tensor);
      ASSERT_EQ(dims.size(), num_dims_analytical);
      for (int j = 0; j < num_dims_analytical; j++) {
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 5K bytes
    - Viewed (0)
  8. tensorflow/c/eager/gradient_checker.cc

      AbstractTensorHandlePtr sum_dims;
      {
        vector<int32_t> vals(num_dims_out);
        int64_t vals_shape[] = {num_dims_out};
        Range(&vals, 0, num_dims_out);
        AbstractTensorHandle* sum_dims_raw = nullptr;
        TF_RETURN_IF_ERROR(TestTensorHandleWithDims<int32_t, TF_INT32>(
            ctx, vals.data(), vals_shape, 1, &sum_dims_raw));
        sum_dims.reset(sum_dims_raw);
      }
    
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 7.3K bytes
    - Viewed (0)
  9. tensorflow/c/experimental/gradients/math_grad_test.cc

      GTEST_SKIP();
    
      float A_vals[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f};
      int64_t A_dims[] = {3, 3};
      AbstractTensorHandlePtr A;
      {
        AbstractTensorHandle* A_raw;
        status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
            immediate_execution_ctx_.get(), A_vals, A_dims, 2, &A_raw);
        ASSERT_EQ(errors::OK, status_.code()) << status_.message();
        A.reset(A_raw);
      }
    
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Thu Apr 13 17:32:14 GMT 2023
    - 16.3K bytes
    - Viewed (0)
  10. tensorflow/c/experimental/gradients/nn_grad.cc

      ImmediateTensorHandlePtr dim(imm_ctx->CreateLocalHandle(minus_1.get()));
      AbstractTensorHandle* expand_dims_outputs;
      TF_RETURN_IF_ERROR(
          ops::ExpandDims(ctx, vec, dim.get(), &expand_dims_outputs, "ExpandDims"));
      TF_RETURN_IF_ERROR(
          ops::Mul(ctx, expand_dims_outputs, mat, &outputs[0], "Mul"));
      expand_dims_outputs->Unref();
      return absl::OkStatus();
    }
    
    class SparseSoftmaxCrossEntropyWithLogitsGradientFunction
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 5.7K bytes
    - Viewed (0)
Back to top