- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 229 for transposes (0.11 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
// Try to reuse result transposes. TransposeOp transpose = ReuseExistingTranspose( &operand, permutation, op, permutation_op, &transpose_ops); // If no transpose available for using, create new one. if (!transpose) transpose = builder.create<TransposeOp>(loc, operand.get(), permutation_op); operand.set(transpose); } // Remove unused transpose operations.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
return false; } return true; } // In some cases, transposes may commute with elementwise operations. In order // to make as many tranposes redudant as possible, we can "push" transposes // back so that they fuse later on. These patterns handles 2 such cases in // a conservative fashion; on-net it will never add to the number of transposes // in the graph. // ewise(tpose(x), tpose(y)) -> tpose(ewise(x, y))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_lowering_patterns.td
"== mlir::BoolAttr::get($_builder.getContext(), true)">>; def IsNhwcLayoutOp: Constraint<Neg<IsNchwLayoutOp.predicate>>; // TODO(b/343278954): Move the creation of transposes to a separate prepare pass // to avoid creating multiple pattern-rewrite rules for the same composite op. def LegalizeTorchUpsampleBlinear2dComposite: Pat< (MHLO_CompositeOp:$old_val (variadic $input),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/fold_constant_transpose.cc
// `TransposeOp`'s permutation attribute. const DenseElementsTransposer transposer(original_shape, op.getPermutation()); SmallVector<float> transposed_values = transposer.TransposeValues(original_values); // Create a new constant op with the transposed values. const Location combined_loc =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/graph_optimization_pass.cc
tf_executor::CreateTFExecutorIslandCoarseningPass()); pm.addPass(CreateTFShapeInferencePass()); // Assign optimal data layout to layout sensitive operations and delete // redundant transposes from the IR. LayoutOptimizationPipelineOptions layout_optimization_options; CreateLayoutOptimizationPipeline(pm.nest<func::FuncOp>(), layout_optimization_options);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 09:56:53 UTC 2024 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_begin.mlir
// CHECK: %[[TRANSPOSE_1:.*]] = "tf.Transpose"(%[[EXPAND_DIMS]], %[[CST_0]]) : (tensor<8x64x1xf32>, tensor<3xi32>) -> tensor<1x8x64xf32> // CHECK: %[[TRANSPOSE_2:.*]] = "tf.Transpose"(%arg1, %[[CST_0]]) : (tensor<8x64x64xf32>, tensor<3xi32>) -> tensor<64x8x64xf32> // CHECK: %[[ADD:.*]] = "tf.AddV2"(%[[TRANSPOSE_1]], %[[TRANSPOSE_2]]) {device = ""} : (tensor<1x8x64xf32>, tensor<64x8x64xf32>) -> tensor<64x8x64xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir
// CHECK: %[[RES_TRANSPOSE_0:[0-9]*]] = "tf.Transpose"(%[[ADD]], %[[RES_PERM]]) // CHECK: %[[RES_TRANSPOSE_1:[0-9]*]] = "tf.Transpose"(%[[RES_TRANSPOSE_0]], %[[RES_PERM]]) // CHECK: return %[[RES_TRANSPOSE_1]] %0 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32> %1 = "tf.Transpose"(%arg0, %0) : (tensor<1x4x4x8xf32>, tensor<4xi32>) -> tensor<1x8x4x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-include-tf2xla-fallback.mlir
func.return %1 : tensor<4x7xcomplex<f64>> } // BatchMatMulV2 has native as well as fallback lowering patterns available. // The fallback pattern uses dot_general without broadcast on operands and then // transposes the output which is faster. However, the fallback pattern doesn't // support dynamic shaped operands like the native lowering. Verify that // fallback lowering is preferred for static shaped operands when available.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize_layout.cc
} static Value CreateTranspose(OpBuilder& builder, Value source, ArrayRef<int64_t> perm) { return builder.create<stablehlo::TransposeOp>(source.getLoc(), source, perm) ->getResult(0); } // Transform pad(transpose(x)) to transpose(pad(x)) struct TransposeCommuteWithPad : public OpRewritePattern<stablehlo::PadOp> { using OpRewritePattern<stablehlo::PadOp>::OpRewritePattern;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 8.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h
// quantized typed tensors and converts them to equivalent ops in the TFLite // dialect. std::unique_ptr<OperationPass<func::FuncOp>> CreateUniformQuantizedStableHloToTflPass(); // Create a pass that commute transposes through specific ops std::unique_ptr<OperationPass<ModuleOp>> CreateTransposeCommuteOpsPass(); // Create a pass that legalizes MHLO to TF dialect. std::unique_ptr<OperationPass<ModuleOp>> CreateLegalizeHloToTfPass();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 3.2K bytes - Viewed (0)