- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 33 for BatchMatMulV2 (0.21 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc
for (int64_t i = 0; i < input.size(); ++i) { output[permutation[i]] = input[i]; } return output; } // Computes the transpositions required to convert dnums to one supported by // tf.BatchMatmulV2 and returns the new set of dimension numbers with them. // Transposed LHS shape will be B0,...,Bn,L0,...,Ln,C0,...,Cn and, // transposed RHS shape will be B0,...,Bn,C0,...,Cn,R0,...,Rn respectively.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 33.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 33.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
// operands are properly supported in declarative rewrite rule specification. DECL_CONVERT_OP(Assert); DECL_CONVERT_OP(ConcatV2); DECL_CONVERT_OP(BatchMatMul); DECL_CONVERT_OP(BatchMatMulV2); DECL_CONVERT_OP(BatchMatMulV3); DECL_CONVERT_OP(MatMul); DECL_CONVERT_OP(MatrixDiagV2); DECL_CONVERT_OP(MatrixDiagV3); DECL_CONVERT_OP(Pack); DECL_CONVERT_OP(Split); DECL_CONVERT_OP(SplitV);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad.cc
(*grad_outputs)[1] = Reshape(scope, ReduceSum(scope, (*grad_outputs)[1], reduce.r1), sy); return scope.status(); } REGISTER_GRADIENT_OP("BatchMatMulV2", BatchMatMulV2Grad); REGISTER_GRADIENT_OP("BatchMatMulV3", BatchMatMulV2Grad); Status CumsumGrad(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 50.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
} func.func @matmul_batchv2(%arg0: tensor<2x10x15xf32>, %arg1: tensor<15x17xf32>) -> tensor<2x10x17xf32> { %0 = "tf.BatchMatMulV2"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", device = "/device:CPU:0", name = "MatMul", adj_x = false, adj_y = false} : (tensor<2x10x15xf32>, tensor<15x17xf32>) -> tensor<2x10x17xf32> func.return %0 : tensor<2x10x17xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 05 01:54:33 UTC 2024 - 153.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
func.return %0: tensor<2x3xf32> } // CHECK-LABEL: testBatchMatMulToV2 func.func @testBatchMatMulToV2(%arg0: tensor<2x3x5xf32>, %arg1: tensor<2x5x7xf32>) -> tensor<2x3x7xf32> { // CHECK: "tf.BatchMatMulV2"(%arg0, %arg1) <{adj_x = false, adj_y = false, grad_x = false, grad_y = false}> {device = "/job:localhost/replica:0/task:0/device:GPU:0"}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
"AssignVariableOp", "AssignVariableXlaConcatND", "AvgPool", "AvgPool3D", "AvgPool3DGrad", "AvgPoolGrad", "BatchMatMul", "BatchMatMulV2", "BatchMatMulV3", "BatchToSpace", "BatchToSpaceND", "BesselI0e", "BesselI1e", "Betainc", "BiasAddV1", "Bincount", "Bucketize",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
llvm::ArrayRef<int64_t> y_batches = y_shape.drop_back(2); // Check compatibility of batch dimensions if both input shapes are known. // BatchMatMul should have exactly the same batch dimensions and // BatchMatMulV2 should have broadcastable batch dimensions. // // The last two dimensions are non-batch dimensions that don't need to // participate in batch dimension compatibility check. if (std::is_same<OpT, BatchMatMulOp>()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED: self.assertFalse(self._contains_op(output_graphdef, 'XlaDotV2')) self.assertTrue(self._contains_op(output_graphdef, 'BatchMatMulV2')) else: self.assertFalse(self._contains_op(output_graphdef, 'XlaDotV2')) self.assertTrue(self._contains_op(output_graphdef, 'Einsum')) @parameterized.named_parameters(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
let summary = "Batch Matrix Multiply Operator"; let description = [{ Performs a batched matrix multiplication on the inputs. Follows the conventions of TensorFlow BatchMatMulV2, with support for unknown dimensions in the batch dimensions and broadcasting. Inputs: `inputs[0]`: required: input LHS `inputs[1]`: required: input RHS
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0)