- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 254 for mhlo (2.2 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc
lhs_flattend = rewriter.create<mhlo::ReshapeOp>( loc, RankedTensorType::get(lhs_flattened_shape, lhs_type.getElementType()), lhs_transposed.getResult()); } else { auto lhs_flattend_shape_op = BuildDotOperandFlattenedShapeOp( lhs, lhs_dot_dimensions_info, builder, /*is_lhs=*/true); lhs_flattend = rewriter.create<mhlo::DynamicReshapeOp>( loc,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir
tensor<64x1001xf32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg4: tensor<1001xf32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg5: tensor<f32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg6: tensor<f32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg7: tensor<f32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\0...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 37.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
// Converts a mhlo.reduce op with a mlho binary operation into a tensorflow // reduction operation. If the initial value can be ignored, then convert it // into a single TfReduceOp. Otherwise, convert it into a TfReduceOp followed by // a TfBinaryOp. // For example: // 1) A mhlo::ReduceOp on value `x` with a mhlo::AndOp and a constant initial // value `true` is converted to a TF::Any on value `x`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_communication.cc
#include "xla/client/sharding_builder.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/primitive_util.h" #include "xla/side_effect_util.h" #include "xla/translate/mhlo_to_hlo/type_to_shape.h" namespace mlir { using func::FuncOp; namespace mhlo { namespace { constexpr char kShardingAttr[] = "mhlo.sharding"; constexpr char kFrontendAttributesAttr[] = "mhlo.frontend_attributes";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 40.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h
rewriter.replaceOpWithNewOp<mhlo::TransposeOp>( scatter_op, scatter_op.getResult(0).getType(), tf_scatter_op, inverse_permutation); return success(); } } }; using ConvertScatterAddOp = ConvertScatterOp<mhlo::AddOp, TF::TensorScatterAddOp>; using ConvertScatterMaxOp = ConvertScatterOp<mhlo::MaxOp, TF::TensorScatterMaxOp>; using ConvertScatterMinOp =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.cc
// of dynamic shapes. Therefore we lower chlo ops after optimization. pm.addNestedPass<func::FuncOp>(CreateOptimizeIntGraphPass()); pm.addNestedPass<func::FuncOp>(mhlo::createChloLegalizeToHloPass()); pm.addNestedPass<func::FuncOp>(createCanonicalizerPass()); pm.addPass(createSymbolDCEPass()); // MHLO -> StableHLO legalization.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf_mlir_opt_main.cc
mlir::tf_saved_model::registerTensorFlowSavedModelPasses(); mlir::TFL::registerTensorFlowLitePasses(); mlir::mhlo::registerAllMhloPasses(); // These are in compiler/mlir/tf2xla and not part of the above MHLO passes. mlir::mhlo::registerLegalizeTfPasses(); mlir::mhlo::registerTfXlaPasses(); mlir::quant::stablehlo::registerBridgePasses(); tensorflow::tf2xla::internal::registerTFXLABridgeClusteringPasses();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:16:49 UTC 2024 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/unfold_splat_constant_pass.cc
#include "mlir/Support/LLVM.h" // from @llvm-project #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" namespace mlir { namespace odml { namespace { #define DEBUG_TYPE "unfold-splat-constant-pass" #define GEN_PASS_DEF_UNFOLDSPLATCONSTANTPASS #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h.inc" // Undo the MHLO::BroadcastInDimOp folding pattern on splat tensor.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" namespace mlir { namespace odml { // Convert mhlo.dot to mhlo.dot_general. LogicalResult ConvertDotToDotGeneral(mhlo::DotOp op, PatternRewriter &rewriter) { auto lhs_type = mlir::cast<ShapedType>(op.getLhs().getType());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 26.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/transforms.cc
// on StableHLO, treating StableHLO purely as an input format, and do all // optimizations via MHLO passes that can be shared with the OpenXLA compiler. // Therefore, this function inserts a StableHLO <=> MHLO roundtrip to make // this happen. // StableHLO -> MHLO legalization. pm.addPass(mhlo::createStablehloLegalizeToHloPass()); AddMhloOptimizationPasses(pm, /*enable_stablehlo_quantizer=*/false);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 04:34:23 UTC 2024 - 5.8K bytes - Viewed (0)