- Sort Score
- Result 10 results
- Languages All
Results 121 - 130 of 438 for mul (0.06 sec)
-
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/if_op.mlir
%0 = tfl.add %arg0, %arg1 {fused_activation_function = "NONE"} : tensor<*xf32> func.return %0 : tensor<*xf32> } func.func @cond_false(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> { %0 = tfl.mul %arg0, %arg1 {fused_activation_function = "NONE"} : tensor<*xf32> func.return %0 : tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 1.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/compute-cost.mlir
%0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU", tac.device = "CPU"} : (tensor<10x10x10xf32>, tensor<10xf32>) -> tensor<10x10x10xf32> %1 = "tfl.mul"(%0, %arg1) {fused_activation_function = "RELU", tac.device = "CPU"} : (tensor<10x10x10xf32>, tensor<10xf32>) -> tensor<10x10x10xf32> func.return %1 : tensor<10x10x10xf32> } // ----- // CHECK: tac.cost = 0x4B673001
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 24 05:29:10 UTC 2022 - 4.1K bytes - Viewed (0) -
src/crypto/internal/bigmod/nat_test.go
b, _ := new(big.Int).SetString("180692823610368451951102211649591374573781973061758082626801", 10) n := new(big.Int).Mul(a, b) N, _ := NewModulusFromBig(n) A := NewNat().setBig(a).ExpandFor(N) B := NewNat().setBig(b).ExpandFor(N) if A.Mul(B, N).IsZero() != 1 { t.Error("a * b mod (a * b) != 0") } i := new(big.Int).ModInverse(a, b) N, _ = NewModulusFromBig(b)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jan 12 00:56:20 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize-after-quantization.mlir
%1 = "tfl.mul"(%0, %cst) {fused_activation_function = "NONE"} : (tensor<256x8x7x3xf32>, tensor<3xf32>) -> tensor<256x8x7x3xf32> func.return %1 : tensor<256x8x7x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 1.4K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad.cc
AbstractTensorHandle* expand_dims_outputs; TF_RETURN_IF_ERROR( ops::ExpandDims(ctx, vec, dim.get(), &expand_dims_outputs, "ExpandDims")); TF_RETURN_IF_ERROR( ops::Mul(ctx, expand_dims_outputs, mat, &outputs[0], "Mul")); expand_dims_outputs->Unref(); return absl::OkStatus(); } class SparseSoftmaxCrossEntropyWithLogitsGradientFunction : public GradientFunction { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:38:45 UTC 2024 - 5.7K bytes - Viewed (0) -
src/crypto/internal/edwards25519/scalarmult_test.go
var p Point p.ScalarMult(dalekScalar, B) if dalekScalarBasepoint.Equal(&p) != 1 { t.Error("Scalar mul does not match dalek") } checkOnCurve(t, &p) } func TestBaseMultVsDalek(t *testing.T) { var p Point p.ScalarBaseMult(dalekScalar) if dalekScalarBasepoint.Equal(&p) != 1 { t.Error("Scalar mul does not match dalek") } checkOnCurve(t, &p) } func TestVarTimeDoubleBaseMultVsDalek(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 28 17:26:17 UTC 2023 - 4.9K bytes - Viewed (0) -
src/crypto/internal/nistec/fiat/p384.go
return e } // Sub sets e = t1 - t2, and returns e. func (e *P384Element) Sub(t1, t2 *P384Element) *P384Element { p384Sub(&e.x, &t1.x, &t2.x) return e } // Mul sets e = t1 * t2, and returns e. func (e *P384Element) Mul(t1, t2 *P384Element) *P384Element { p384Mul(&e.x, &t1.x, &t2.x) return e } // Square sets e = t * t, and returns e. func (e *P384Element) Square(t *P384Element) *P384Element {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Aug 12 00:04:29 UTC 2022 - 3.6K bytes - Viewed (0) -
src/crypto/internal/nistec/fiat/p521.go
return e } // Sub sets e = t1 - t2, and returns e. func (e *P521Element) Sub(t1, t2 *P521Element) *P521Element { p521Sub(&e.x, &t1.x, &t2.x) return e } // Mul sets e = t1 * t2, and returns e. func (e *P521Element) Mul(t1, t2 *P521Element) *P521Element { p521Mul(&e.x, &t1.x, &t2.x) return e } // Square sets e = t * t, and returns e. func (e *P521Element) Square(t *P521Element) *P521Element {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Aug 12 00:04:29 UTC 2022 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/lower_variable_ops_to_ml_program.mlir
// CHECK: "tf.Mul"(%[[v1]], %[[v2]]) %0 = "tf.VarHandleOp"() {container = "", shared_name = "v"} : () -> tensor<!tf_type.resource<tensor<10xf32>>> %1 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource<tensor<10xf32>>>) -> tensor<10xf32> %2 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource<tensor<10xf32>>>) -> tensor<10xf32> %3 = "tf.Mul"(%1, %2) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Sep 19 19:00:41 UTC 2022 - 6K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
std::vector<Output>* grad_outputs) { auto softmax = Exp(scope, op.output(0)); auto sum = Sum(scope, grad_inputs[0], {1}, Sum::KeepDims(true)); auto mul = Mul(scope, sum, softmax); auto dx = Sub(scope, grad_inputs[0], mul); grad_outputs->push_back(dx); return scope.status(); } REGISTER_GRADIENT_OP("LogSoftmax", LogSoftmaxGrad); Status ReluGradHelper(const Scope& scope, const Operation& op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0)