Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 35 for Brad (0.06 sec)

  1. src/database/sql/sql_test.go

    		wantErr string
    	}
    	execTests := []execTest{
    		// Okay:
    		{[]any{"Brad", 31}, ""},
    		{[]any{"Brad", int64(31)}, ""},
    		{[]any{"Bob", "32"}, ""},
    		{[]any{7, 9}, ""},
    
    		// Invalid conversions:
    		{[]any{"Brad", int64(0xFFFFFFFF)}, "sql: converting argument $2 type: sql/driver: value 4294967295 overflows int32"},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 18:42:28 UTC 2024
    - 111.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/decompose_resource_ops.mlir

        // CHECK: [[GRAD_SQUARE:%.*]] = "tf.Mul"([[GRAD]], [[GRAD]]) : (tensor<f32>, tensor<f32>) -> tensor<f32>
        // CHECK: [[NEW_ACC:%.*]] = "tf.AddV2"([[OLD_ACC]], [[GRAD_SQUARE]]) : (tensor<*xf32>, tensor<f32>) -> tensor<*xf32>
        // CHECK: [[LR_MULTIPLY:%.*]] = "tf.Mul"([[LR]], [[GRAD]]) : (tensor<f32>, tensor<f32>) -> tensor<f32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 51.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops.td

         (CreateTFReadVariableOp $src_op, $grad, $ms_resource),
         (TF_AddV2Op:$ms_new
           (TF_MulOp
             (TF_MulOp $grad, $grad),
             (TF_SubOp $one, $rho)
           ),
           (TF_MulOp
              (CreateTFReadVariableOp $src_op, $grad, $ms_resource),
              $rho
           )
         ),
         (TF_AssignVariableOp $ms_resource, $ms_new, (CreateConstBoolAttrFalse)),
         // mg = grad * (one - rho) + mg * rho;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 20.7K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/gradients/math_grad.cc

        /* Given upstream grad U and a Sub op A-B, the gradients are:
         *
         *    dA =  U
         *    dB = -U
         *
         */
    
        // Grad for A
        DCHECK(grad_outputs[0]);
        grad_inputs[0] = grad_outputs[0];
        grad_inputs[0]->Ref();
    
        // Grad for B
        // negate the upstream grad
        std::string name = "Neg_Sub_Grad_B";
        TF_RETURN_IF_ERROR(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 28 13:53:47 UTC 2024
    - 15.2K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/gradients/nn_grad.cc

                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        // Grad for Softmax Input
        TF_RETURN_IF_ERROR(BroadcastMul(
            ctx, grad_outputs[0], forward_outputs_[1],
            grad_inputs.subspan(0, 1)));  // upstream_grad * local softmax grad
    
        // Grad for labels is null
        grad_inputs[1] = nullptr;
        return absl::OkStatus();
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:38:45 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  6. tensorflow/c/eager/tape.h

            return s;
          }
        } else {
          if (!persistent_) {
            trace.backward_function_deleter(trace.backward_function);
          }
          for (Gradient* grad : out_gradients) {
            if (grad != nullptr) {
              vspace.DeleteGradient(grad);
            }
          }
        }
        for (int i = 0, end = in_gradients.size(); i < end; ++i) {
          const int64_t id = trace.input_tensor_id[i];
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 12:40:29 UTC 2024
    - 47.2K bytes
    - Viewed (0)
  7. tensorflow/c/c_api_function.cc

      if (TF_GetCode(status) != TF_OK) return;
      if (!grad) return;
    
      status->status = g->graph.AddFunctionDef(grad->record->fdef(),
                                               grad->record->stack_traces());
      if (TF_GetCode(status) != TF_OK) return;
    
      tensorflow::GradientDef gdef;
      gdef.set_function_name(func->record->fdef().signature().name());
      gdef.set_gradient_func(grad->record->fdef().signature().name());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 03:35:10 UTC 2024
    - 13.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir

      func.func @max_pool_grad_valid(%orig_input: tensor<10x24x24x64xf32>, %orig_output: tensor<10x12x12x64xf32>, %grad: tensor<10x12x12x64xf32>) -> tensor<10x24x24x64xf32> {
        // CHECK: %[[ZERO:.*]] = mhlo.constant dense<0.000000e+00> : tensor<f32>
        // CHECK: %[[RESULT:.*]] = "mhlo.select_and_scatter"(%[[INPUT]], %[[GRAD]], %[[ZERO]]) <{
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 38.6K bytes
    - Viewed (0)
  9. tensorflow/cc/framework/gradients.cc

      if (grads_to_keep.empty()) {
        // Nothing propagated back. Return 'NoGradient'.
        *grad = NoGradient();
      } else if (grads_to_keep.size() == 1) {
        // Just one backprop edge.
        *grad = grads_to_keep[0];
      } else {
        // Otherwise, adds backprop-ed gradients.
        // TODO(andydavis) Use a better accumulator here.
        *grad = ops::AddN(scope_, grads_to_keep);
      }
    
      return absl::OkStatus();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 22K bytes
    - Viewed (0)
  10. src/cmd/compile/internal/ssa/_gen/PPC64.rules

    (Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRD (MOVBZreg x) y)
    (Rsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAD x y)
    (Rsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAW x y)
    (Rsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAD (MOVHreg x) y)
    (Rsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SRAD (MOVBreg x) y)
    
    // Unbounded shifts. Go shifts saturate to 0 or -1 when shifting beyond the number of
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 19:02:52 UTC 2024
    - 53.2K bytes
    - Viewed (0)
Back to top