- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 285 for mhlo (4.45 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tools/stablehlo_quant_opt.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h" #include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h" #include "xla/mlir_hlo/mhlo/IR/register.h" #include "xla/mlir_hlo/mhlo/transforms/passes.h" #include "tensorflow/core/ir/types/dialect.h" int main(int argc, char** argv) { tensorflow::InitMlir y(&argc, &argv); mlir::registerAllPasses();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 07:37:34 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc
} }; // Converts TensorFlow DiagPartOp to HLO ops using reduction on masked matrix. // For a Rank-2 input, it creates the following ops: // %1 = "mhlo.iota"() {iota_dimension = 0 : i64} // %2 = "mhlo.iota"() {iota_dimension = 1 : i64} // %3 = "mhlo.compare"(%1, %2) {comparison_direction = "EQ"} // %4 = mhlo.constant dense<0.000000e+00> : tensor<f32> // %5 = "mhlo.broadcast"(%4) // %6 = "mhlo.select"(%3, %input, %5)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 291.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc
lhs_flattend = rewriter.create<mhlo::ReshapeOp>( loc, RankedTensorType::get(lhs_flattened_shape, lhs_type.getElementType()), lhs_transposed.getResult()); } else { auto lhs_flattend_shape_op = BuildDotOperandFlattenedShapeOp( lhs, lhs_dot_dimensions_info, builder, /*is_lhs=*/true); lhs_flattend = rewriter.create<mhlo::DynamicReshapeOp>( loc,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir
tensor<64x1001xf32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg4: tensor<1001xf32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg5: tensor<f32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg6: tensor<f32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg7: tensor<f32> {mhlo.is_same_data_across_replicas = true, mhlo.sharding = "\08\0...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 37.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
// Converts a mhlo.reduce op with a mlho binary operation into a tensorflow // reduction operation. If the initial value can be ignored, then convert it // into a single TfReduceOp. Otherwise, convert it into a TfReduceOp followed by // a TfBinaryOp. // For example: // 1) A mhlo::ReduceOp on value `x` with a mhlo::AndOp and a constant initial // value `true` is converted to a TF::Any on value `x`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_communication.cc
#include "xla/client/sharding_builder.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/primitive_util.h" #include "xla/side_effect_util.h" #include "xla/translate/mhlo_to_hlo/type_to_shape.h" namespace mlir { using func::FuncOp; namespace mhlo { namespace { constexpr char kShardingAttr[] = "mhlo.sharding"; constexpr char kFrontendAttributesAttr[] = "mhlo.frontend_attributes";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 40.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h
rewriter.replaceOpWithNewOp<mhlo::TransposeOp>( scatter_op, scatter_op.getResult(0).getType(), tf_scatter_op, inverse_permutation); return success(); } } }; using ConvertScatterAddOp = ConvertScatterOp<mhlo::AddOp, TF::TensorScatterAddOp>; using ConvertScatterMaxOp = ConvertScatterOp<mhlo::MaxOp, TF::TensorScatterMaxOp>; using ConvertScatterMinOp =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.cc
// of dynamic shapes. Therefore we lower chlo ops after optimization. pm.addNestedPass<func::FuncOp>(CreateOptimizeIntGraphPass()); pm.addNestedPass<func::FuncOp>(mhlo::createChloLegalizeToHloPass()); pm.addNestedPass<func::FuncOp>(createCanonicalizerPass()); pm.addPass(createSymbolDCEPass()); // MHLO -> StableHLO legalization.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf_mlir_opt_main.cc
mlir::tf_saved_model::registerTensorFlowSavedModelPasses(); mlir::TFL::registerTensorFlowLitePasses(); mlir::mhlo::registerAllMhloPasses(); // These are in compiler/mlir/tf2xla and not part of the above MHLO passes. mlir::mhlo::registerLegalizeTfPasses(); mlir::mhlo::registerTfXlaPasses(); mlir::quant::stablehlo::registerBridgePasses(); tensorflow::tf2xla::internal::registerTFXLABridgeClusteringPasses();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:16:49 UTC 2024 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/unfold_splat_constant_pass.cc
#include "mlir/Support/LLVM.h" // from @llvm-project #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" namespace mlir { namespace odml { namespace { #define DEBUG_TYPE "unfold-splat-constant-pass" #define GEN_PASS_DEF_UNFOLDSPLATCONSTANTPASS #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h.inc" // Undo the MHLO::BroadcastInDimOp folding pattern on splat tensor.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.8K bytes - Viewed (0)