Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 66 for shale (0.13 sec)

  1. SECURITY.md

    inspected and debugged and it is intended to be used during the development
    phase.
    
    As part of the differences that make Eager mode easier to debug, the [shape
    inference
    functions](https://www.tensorflow.org/guide/create_op#define_the_op_interface)
    are skipped, and any checks implemented inside the shape inference code are not
    executed.
    
    The security impact of skipping those checks should be low, since the attack
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Sun Oct 01 06:06:35 GMT 2023
    - 9.6K bytes
    - Viewed (0)
  2. ci/official/utilities/rename_and_verify_wheels.sh

    "$python" -m pip install *.whl $TFCI_PYTHON_VERIFY_PIP_INSTALL_ARGS
    if [[ "$TFCI_WHL_IMPORT_TEST_ENABLE" == "1" ]]; then
      "$python" -c 'import tensorflow as tf; t1=tf.constant([1,2,3,4]); t2=tf.constant([5,6,7,8]); print(tf.add(t1,t2).shape)'
      "$python" -c 'import sys; import tensorflow as tf; sys.exit(0 if "keras" in tf.keras.__name__ else 1)'
    fi
    # VERY basic check to ensure the [and-cuda] package variant is installable.
    Shell Script
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Mar 27 21:16:27 GMT 2024
    - 3.4K bytes
    - Viewed (0)
  3. tensorflow/c/c_api_function_test.cc

                                                               TF_DeleteStatus);
    
      TF_Tensor* tensor_shape = Int32Tensor({37, 1});
      TF_Operation* shape = Const(tensor_shape, func_graph.get(), s.get(), "shape");
      TF_Operation* random =
          RandomUniform(shape, TF_FLOAT, func_graph.get(), s.get());
    
      TF_Output outputs[] = {{random, 0}};
      *func = TF_GraphToFunction(func_graph.get(), name,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Jul 20 22:08:54 GMT 2023
    - 63.6K bytes
    - Viewed (6)
  4. tensorflow/c/eager/c_api_test_util.cc

      // Create the variable handle.
      TFE_Op* op = TFE_NewOp(ctx, "VarHandleOp", status);
      if (TF_GetCode(status) != TF_OK) return nullptr;
      TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
      TFE_OpSetAttrShape(op, "shape", {}, 0, status);
      TFE_OpSetAttrString(op, "container", "localhost", 0);
      TFE_OpSetAttrString(op, "shared_name", "", 0);
      if (!device_name.empty()) {
        TFE_OpSetDevice(op, device_name.c_str(), status);
      }
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Feb 21 22:37:46 GMT 2024
    - 23.5K bytes
    - Viewed (2)
  5. tensorflow/c/c_test_util.h

    TF_Operation* Neg(TF_Operation* n, TF_Graph* graph, TF_Status* s,
                      const char* name = "neg");
    
    TF_Operation* LessThan(TF_Output l, TF_Output r, TF_Graph* graph, TF_Status* s);
    
    TF_Operation* RandomUniform(TF_Operation* shape, TF_DataType dtype,
                                TF_Graph* graph, TF_Status* s);
    
    // Split `input` along the first dimension into 3 tensors
    TF_Operation* Split3(TF_Operation* input, TF_Graph* graph, TF_Status* s,
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Aug 09 01:06:53 GMT 2018
    - 6K bytes
    - Viewed (0)
  6. tensorflow/c/eager/c_api_experimental.cc

    }
    
    TFE_MonitoringBuckets* TFE_MonitoringNewExponentialBuckets(double scale,
                                                               double growth_factor,
                                                               int bucket_count) {
      return new TFE_MonitoringBuckets([scale, growth_factor, bucket_count]() {
        return tensorflow::monitoring::Buckets::Exponential(scale, growth_factor,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 11 23:52:39 GMT 2024
    - 35.9K bytes
    - Viewed (3)
  7. tensorflow/c/experimental/gradients/math_grad.cc

      }
    
      Status Compute(AbstractContext* ctx,
                     absl::Span<AbstractTensorHandle* const> grad_outputs,
                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        // TODO(vnvo2409): Add shape broadcasting
        /* Given upstream grad U and a Div op: Z = X/Y, the gradients are:
         *
         *    dX = U / Y
         *    dY = -U*X / Y^2 = (X/Y) * -U / Y = -U*Z / Y
         *
         */
    
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 15.2K bytes
    - Viewed (0)
  8. tensorflow/c/c_api_experimental.cc

        ShapeHandle shape_handle = c.output(i);
        TF_ShapeAndType& shape = output_shapes_result->items[i];
        shape.num_dims = c.Rank(shape_handle);
        if (shape.num_dims == InferenceContext::kUnknownRank) {
          shape.dims = nullptr;
          continue;
        }
        shape.dims = new int64_t[shape.num_dims];
        for (size_t j = 0; j < shape.num_dims; ++j) {
          shape.dims[j] = c.Value(c.Dim(shape_handle, j));
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
  9. tensorflow/c/eager/parallel_device/parallel_device_lib_test.cc

      ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
      const std::vector<std::unique_ptr<ParallelTensor>>& handles = *outputs;
      const std::vector<int64_t>* shape;
      Status s = handles[0]->Shape(&shape);
      ASSERT_TRUE(s.ok());
      EXPECT_EQ(0, shape->size());
    }
    
    TEST(PARALLEL_DEVICE_LIB, TestCancelOnError) {
      std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
          TF_NewStatus(), TF_DeleteStatus);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Jul 08 23:47:35 GMT 2021
    - 15.3K bytes
    - Viewed (0)
  10. tensorflow/c/c_api_experimental.h

    TF_CAPI_EXPORT extern void TF_DeleteShapeAndTypeListArray(
        TF_ShapeAndTypeList** shape_list_array, int num_items);
    
    // Infer shapes for the given `op`. The arguments mimic the arguments of the
    // `shape_inference::InferenceContext` constructor. Note the following:
    //   - The inputs of the `op` are not used for shape inference. So, it is
    //     OK to not have the inputs properly set in `op`. See `input_tensors`
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 27 21:07:00 GMT 2023
    - 15.1K bytes
    - Viewed (0)
Back to top