Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 51 for backprop (0.22 sec)

  1. tensorflow/cc/framework/while_gradients.cc

      return result;
    }
    
    // The backprop loop counter and main backprop loop run in their own execution
    // frame (conceptually, the main forward loop and forward loop counter run
    // together in a frame, then the backprop loop counter and backprop loop run
    // together in a different frame). This returns the frame name to use for the
    // backprop while loops.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  2. tensorflow/c/experimental/ops/nn_ops.cc

      *loss = temp_outputs[0];
      *backprop = temp_outputs[1];
      return status;
    }
    
    // Op: ReluGrad()
    // Summary: Computes rectified linear gradients for a Relu operation.
    //
    // Description:
    Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients,
                    AbstractTensorHandle* const features,
                    AbstractTensorHandle** backprops, const char* name,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 10 19:11:36 UTC 2022
    - 5.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfr/python/test_utils.py

        # compute with op.
        with backprop.GradientTape() as gt:
          for var_ in vars_:
            gt.watch(var_)
          y = compute_op(**op_kwargs)  # uses op and decomposites by the graph pass.
          grads = gt.gradient(y, vars_)  # uses registered gradient function.
    
        # compute with composition
        with backprop.GradientTape() as gt:
          for var_ in vars_:
            gt.watch(var_)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jun 02 18:32:17 UTC 2023
    - 1.8K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/gradients/nn_grad_test.cc

        absl::Span<AbstractTensorHandle*> outputs) {
      AbstractTensorHandle* loss;
      AbstractTensorHandle* backprop;
      TF_RETURN_IF_ERROR(ops::SparseSoftmaxCrossEntropyWithLogits(
          ctx, inputs[0], inputs[1], &loss, &backprop,
          "SparseSoftmaxCrossEntropyWithLogits"));
      // `gradient_checker` only works with model that returns only 1 tensor.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 28 13:53:47 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/ops/gen/cpp/golden/testing_ops.h.golden

    //
    Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle* const labels, AbstractTensorHandle** loss, AbstractTensorHandle** backprop, const char* name = nullptr, const char* raw_device_name = nullptr);
    
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 16 19:04:03 UTC 2023
    - 2.9K bytes
    - Viewed (0)
  6. tensorflow/c/experimental/ops/nn_ops.h

        AbstractTensorHandle** backprop, const char* name = nullptr,
        const char* raw_device_name = nullptr);
    
    // Computes rectified linear gradients for a Relu operation.
    Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients,
                    AbstractTensorHandle* const features,
                    AbstractTensorHandle** backprops, const char* name = nullptr,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 10 19:11:36 UTC 2022
    - 2.6K bytes
    - Viewed (0)
  7. tensorflow/c/experimental/ops/gen/cpp/golden/testing_ops.cc.golden

    // Summary:
    //
    // Description:
    Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle* const labels, AbstractTensorHandle** loss, AbstractTensorHandle** backprop, const char* name, const char* raw_device_name) {
      AbstractOperationPtr op_ptr(ctx->CreateOperation());
      TF_RETURN_IF_ERROR(op_ptr->Reset("SparseSoftmaxCrossEntropyWithLogits", raw_device_name));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 16 19:04:03 UTC 2023
    - 6.5K bytes
    - Viewed (0)
  8. tensorflow/c/experimental/ops/array_ops.cc

    //   backprop such that dx = g(dy). In Python,
    //
    //   ```python
    //   with tf.get_default_graph().gradient_override_map(
    //       {'IdentityN': 'OverrideGradientWithG'}):
    //     y, _ = identity_n([f(x), x])
    //
    //   @tf.RegisterGradient('OverrideGradientWithG')
    //   def ApplyG(op, dy, _):
    //     return [None, g(dy)]  # Do not backprop to f(x).
    //   ```
    Status IdentityN(AbstractContext* ctx,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 10 19:11:36 UTC 2022
    - 6.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc

          });
        }
        filter_value = filter.getValue();
        mul_value = multiplier.getValue();
        // In MHLO, Conv filter is in HWIO format, Depthwise conv filter is in HW1O
        // format and backprop input conv filter is in HWOI format.
        // Only fuses multiplier if all dimensions other than the out channel
        // dimension are equal to 1.
        if (!TFL::IsDimensionsDegenerateExceptLastOne(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 22:21:19 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_gpu_cc_70.mlir

      %arg2: tensor<64xf32>
    ) -> tensor<1x28x28x64xf32> {
    
      // CHECK: "tf.FusedBatchNormGradV3"
      // CHECK-SAME: (%[[X_TRANSPOSE:[0-9]*]], %[[Y_TRANSPOSE:[0-9]*]],
      // CHECK-SAME: data_format = "NCHW"
      %x_backprop, %scale_backprop, %offset_backprop, %reserve_1, %reserve_2
        = "tf.FusedBatchNormGradV3"(%arg0, %arg1, %arg2, %arg2, %arg2, %arg2)
           {
             data_format = "NHWC",
             epsilon = 1.001 : f32,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 21 08:41:18 UTC 2022
    - 8.5K bytes
    - Viewed (0)
Back to top