- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 79 for Brad (0.12 sec)
-
tensorflow/compiler/mlir/tfr/examples/pad/ops_defs.py
input_ = tf.raw_ops.Concat( concat_dim=i, values=[left_padding, input_, right_padding]) return input_ @tf.RegisterGradient('NewMirrorPad') def _mirror_pad_grad(op, grad): mode = op.get_attr('mode') return [gen_array_ops.mirror_pad_grad(grad, op.inputs[1], mode=mode), None] @Composite( 'NewMirrorPadGrad', inputs=['input_: T', 'paddings: Tpaddings'], attrs=['mode: {"REFLECT", "SYMMETRIC"}'],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Oct 01 05:00:29 UTC 2021 - 5.6K bytes - Viewed (0) -
src/math/asinh_s390x.s
TEXT ·asinhAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·asinhrodataL18<>+0(SB), R9 LGDR F0, R12 WORD $0xC0293FDF //iilf %r2,1071644671 BYTE $0xFF BYTE $0xFF SRAD $32, R12 WORD $0xB917001C //llgtr %r1,%r12 MOVW R1, R6 MOVW R2, R7 CMPBLE R6, R7, L2 WORD $0xC0295FEF //iilf %r2,1609564159 BYTE $0xFF BYTE $0xFF MOVW R2, R7 CMPBLE R6, R7, L14 L3:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 16 15:34:41 UTC 2019 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/jit/ops/xla_ops_grad.py
# ============================================================================== from tensorflow.python.framework import ops @ops.RegisterGradient("XlaClusterOutput") def _XlaClusterOutputGrad(_, grad): del grad # unused raise RuntimeError("Gradient computation of graph in xla.compile() is " "prohibited because it can cause performance degradation."
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 1.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc
LogicalResult HandleTensorArrayGradV3Op( TF::TensorArrayGradV3Op grad, llvm::SmallDenseMap<Value, TensorArrayStats>* stats) { auto local_var = grad.getHandle(); OpBuilder builder(grad); Value grad_var; auto sit = stats->find(local_var); if (sit == stats->end()) return grad.emitOpError("unknown tensor array"); auto emplace_res = sit->getSecond().grads.try_emplace(grad.getSource().str(), Value()); if (!emplace_res.second) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 02 20:41:19 UTC 2023 - 40.2K bytes - Viewed (0) -
src/math/acosh_s390x.s
TEXT ·acoshAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·acoshrodataL11<>+0(SB), R9 LGDR F0, R1 WORD $0xC0295FEF //iilf %r2,1609564159 BYTE $0xFF BYTE $0xFF SRAD $32, R1 CMPW R1, R2 BGT L2 WORD $0xC0293FEF //iilf %r2,1072693247 BYTE $0xFF BYTE $0xFF CMPW R1, R2 BGT L10 L3: WFCEDBS V0, V0, V2 BVS L1 FMOVD 112(R9), F0 L1: FMOVD F0, ret+8(FP)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 16 15:34:41 UTC 2019 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir
%const = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32> %elem = "tf._SomeOp"() : () -> tensor<3xf32> %grad:2 = "tf.TensorArrayGradV3"(%ta#0, %ta#1) {source = "a"} : (tensor<!tf_type.resource>, tensor<f32>) -> (tensor<!tf_type.resource>, tensor<f32>) %gwrite = "tf.TensorArrayWriteV3"(%grad#0, %const, %elem, %grad#1) : (tensor<!tf_type.resource>, tensor<i32>, tensor<3xf32>, tensor<f32>) -> tensor<f32> func.return }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 49K bytes - Viewed (0) -
tensorflow/cc/gradients/linalg_grad.cc
tensorflow::Output y = op.input(1); if (DataTypeIsComplex(grad.type())) { x = Conj(scope, x); y = Conj(scope, y); } const auto x_shape = Shape(scope, x); const auto y_shape = Shape(scope, y); Output grad_x = EinsumGradWrt(scope, grad, y, x_shape, x_subs, y_subs, output_subs); Output grad_y = EinsumGradWrt(scope, grad, x, y_shape, y_subs, x_subs, output_subs);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 07 23:11:54 UTC 2022 - 20.4K bytes - Viewed (0) -
tensorflow/cc/gradients/data_flow_grad.cc
index = Cast(scope, index, DT_INT32); } // Gather the index specified locations in the gradient and // propagate it as the gradient for the i'th data item. // i = 0: gather(grad, 2) = [g_5, g_6] // i = 1: gather(grad, [1, 0]) = [[g_3, g_4], [g_1, g_2]] grad_outputs->push_back(Gather(scope, grad_inputs[0], index)); } return scope.status(); } REGISTER_GRADIENT_OP("DynamicStitch", DynamicStitchGrad);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jul 24 13:40:35 UTC 2021 - 5.8K bytes - Viewed (0) -
tensorflow/c/eager/tape.h
return s; } } else { if (!persistent_) { trace.backward_function_deleter(trace.backward_function); } for (Gradient* grad : out_gradients) { if (grad != nullptr) { vspace.DeleteGradient(grad); } } } for (int i = 0, end = in_gradients.size(); i < end; ++i) { const int64_t id = trace.input_tensor_id[i];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 02 12:40:29 UTC 2024 - 47.2K bytes - Viewed (0) -
tensorflow/cc/gradients/functional_grad.cc
} func_inputs.insert(std::end(func_inputs), std::begin(grad_inputs), std::end(grad_inputs)); auto grad = SymbolicGradient(scope, func_inputs, input_dtypes, f); for (int i = 0; i < num_inputs; i++) { grad_outputs->push_back(grad[i]); } return scope.status(); } REGISTER_GRADIENT_OP("PartitionedCall", PartitionedCallGrad);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Oct 15 20:09:06 UTC 2021 - 2.1K bytes - Viewed (0)