Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 288 for mhlo (0.23 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo_custom_call.mlir

    func.func @mhlo_custom_call_test__dont_legalize_dict_backend_config(%arg0: tensor<1x4xf32>) -> tensor<1x8xf32> {
      %0 = mhlo.custom_call @custom_call.my_custom_op(%arg0) {
        api_version = 4 : i32,
        backend_config = {foo = "bar"}
      } : (tensor<1x4xf32>) -> (tensor<1x8xf32>)
      func.return %0 : tensor<1x8xf32>
    
      //       CHECK: %0 = mhlo.custom_call @custom_call.my_custom_op(%arg0) {
      //  CHECK-SAME:   api_version = 4 : i32,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 2.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    func.func @sigmoid(%arg0: tensor<2xf32>) -> tensor<2xf32> {
      %0 = mhlo.constant dense<5.000000e-01> : tensor<f32>
      %1 = mhlo.constant dense<2> : tensor<1xi64>
      %2 = mhlo.constant dense<5.000000e-01> : tensor<2xf32>
      %3 = mhlo.multiply %arg0, %2 : tensor<2xf32>
      %4 = "mhlo.tanh"(%3) : (tensor<2xf32>) -> tensor<2xf32>
      %5 = mhlo.multiply %4, %2 : tensor<2xf32>
      %6 = mhlo.add %5, %2 : tensor<2xf32>
      func.return %6 : tensor<2xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/transforms/transforms.h

    // like tf.ResizeBilinear or tf.ResizeNearestNeighbor to mhlo.custom_call ops.
    void AddTFToStablehloPasses(OpPassManager& pm, bool skip_resize,
                                bool smuggle_disallowed_ops);
    
    // This function is a common entry point for all graph optimizations that are
    // not specific to any hardware. It legalizes SHLO->MHLO, does MHLO->MHLO
    // optimizations by calling `AddMhloOptimizationPasses` internally, and
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 01:08:27 UTC 2024
    - 1.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-BatchMatMulV2.mlir

    // CHECK:           [[LHSRE:%.*]] = mhlo.real [[LHS]]
    // CHECK:           [[LHSIM:%.*]] = mhlo.imag [[LHS]]
    // CHECK:           [[LHSIMNEG:%.*]] = mhlo.negate [[LHSIM]]
    // CHECK:           [[LHSCONJ:%.*]] = mhlo.complex [[LHSRE]], [[LHSIMNEG]]
    // CHECK:           [[RHSRE:%.*]] = mhlo.real [[RHS]]
    // CHECK:           [[RHSIM:%.*]] = mhlo.imag [[RHS]]
    // CHECK:           [[RHSIMNEG:%.*]] = mhlo.negate [[RHSIM]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc

    #include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
    #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
    
    namespace mlir {
    namespace odml {
    
    class FuseMhloMulAndConvolutionPattern : public OpRewritePattern<mhlo::MulOp> {
     public:
      using OpRewritePattern<mhlo::MulOp>::OpRewritePattern;
    
      LogicalResult matchAndRewrite(mhlo::MulOp mul_op,
                                    PatternRewriter &rewriter) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 22:21:19 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/tests/unfold_splat_constant_pass.mlir

    // CHECK-LABEL: @unfold_splat_constant_float
    func.func @unfold_splat_constant_float() -> tensor<1x750xf32> {
      %cst = mhlo.constant dense<7.680000e+02> : tensor<1x750xf32>
      func.return %cst : tensor<1x750xf32>
    
      // CHECK-DAG: %0 = mhlo.constant dense<7.680000e+02> : tensor<f32>
      // CHECK: %1 = "mhlo.broadcast_in_dim"(%0) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor<f32>) -> tensor<1x750xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 1.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization_test.cc

          %0 = mhlo.constant dense<1.000000e+00> : tensor<f64>
          %1 = mhlo.convert %0 : (tensor<f64>) -> tensor<i64>
          %2 = mhlo.reshape %1 : (tensor<i64>) -> tensor<1xi64>
          %3 = "mhlo.dynamic_iota"(%2) {iota_dimension = 0 : i64} : (tensor<1xi64>) -> tensor<?xi32>
          %4 = mhlo.multiply %3, %3 : tensor<?xi32>
          return %4 : tensor<?xi32>
        }
      })";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 7.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h

      // Compiles the given Operation with XlaBuilder and imports the generated HLO
      // via the HLO -> MHLO importer.
      absl::StatusOr<mhlo::TupleOp> CompileWithHloImporter(
          tensorflow::OpKernelContext& op_context);
    
      // Import the given XlaComputation into the parent module. Returns the given
      // generated function.
      absl::StatusOr<mhlo::TupleOp> ImportXlaComputation(
          xla::XlaComputation& computation);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:16:07 UTC 2024
    - 5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/unfuse_batch_norm_pass.cc

      return b.createOrFold<mhlo::DynamicBroadcastInDimOp>(
          broadcast_to_type, epsilon, shape_value, dims);
    }
    
    class UnfuseBatchNormTrainingPattern
        : public OpRewritePattern<mhlo::BatchNormTrainingOp> {
     public:
      using OpRewritePattern<mhlo::BatchNormTrainingOp>::OpRewritePattern;
    
      LogicalResult matchAndRewrite(mhlo::BatchNormTrainingOp bn_op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tf2xla/tests/verify-tfxla-legalization.mlir

      %0 = mhlo.constant dense<1.000000e+00> : tensor<f64>
      %1 = mhlo.convert %0 : (tensor<f64>) -> tensor<i64>
      %2 = mhlo.reshape %1 : (tensor<i64>) -> tensor<1xi64>
      // expected-error @+1 {{Node `mhlo.dynamic_iota` must have compile-time constant}}
      %3 = "mhlo.dynamic_iota"(%2) <{iota_dimension = 0 : i64}> : (tensor<1xi64>) -> tensor<?xi32>
      %4 = mhlo.multiply %3, %3 : tensor<?xi32>
      return %4 : tensor<?xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 3.1K bytes
    - Viewed (0)
Back to top