Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 209 for legalized (0.15 sec)

  1. tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h

    //   . Legalizes control flow operations.
    //   . Decomposes compound resource operations so that the only remaining
    //     operations on resource variables are resource reads/writes..
    //   . Replaces resource reads/writes with function inputs/outputs and
    //     eliminates the use of resource variables.
    //   . Legalizes the operations to XLA HLO operations.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 17:24:39 UTC 2024
    - 10.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/legalize_variables.cc

    namespace mlir {
    namespace TFL {
    namespace {
    #define GEN_PASS_DEF_LEGALIZEVARIABLESPASS
    #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc"
    
    // Attribute name to identify whether variables should be legalized to TFLite or
    // not.
    const char kLegalizeTflVariables[] = "tfl._legalize_tfl_variables";
    
    bool HasSupportedElementType(Operation* op) {
      return utils::IsSupportedVariableType(op);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.cc

      target.addLegalDialect<func::FuncDialect>();
      target.addLegalDialect<tensor::TensorDialect>();
      target.addLegalDialect<shape::ShapeDialect>();
      target.addLegalOp<func::CallOp>();
    
      // These ops are legalized in LegalizeTFCommunication after this and that pass
      // only operates on MHLO control flow ops.
      target.addLegalOp<TF::_XlaHostComputeMlirOp, TF::XlaSendToHostOp,
                        TF::XlaRecvFromHostOp>();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 17:44:14 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.cc

          index_lt.getLhs() != body.getArgument(1) ||
          index_lt.getRhs() != body.getArgument(3))
        return failure();
    
      return success();
    }
    
    // Returns true if the given reduce op can be legalized to ArgMax/ArgMin ops.
    std::optional<bool> IsReduceOpLegal(mhlo::ReduceOp reduce_op) {
      if (succeeded(MatchReduceToArgMinMaxType1(reduce_op, true, true)) ||
          succeeded(MatchReduceToArgMinMaxType1(reduce_op, false, true)) ||
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 20:53:17 UTC 2024
    - 8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/passes.h

    class OperationPass;
    class Type;
    
    namespace TFL {
    
    // Creates an instance of the TensorFlow Lite dialect LegalizeTF pass.
    // When the given run_tfl_runtime_verification value is true, it will check each
    // TFL builtin op towards the TFL runtime capability and the incompatible TF ops
    // will be left in the graph without getting legalized. If `preserve_assert_op`
    // is true, the TF::AssertOp will not be removed.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 07 21:29:34 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config_test.cc

      EXPECT_FALSE(IsDynamicPadderOp(TypeID::get<TF::ConstOp>()));
    }
    
    // This test is kind of odd. We go through all the Tensorflow types and check
    // whether they are legalized with MLIR, TF2XLA, or both. Ideally the sets are
    // disjoint, but until that happens, this tests ensures that the set doesn't
    // grow.
    TEST_F(LegalizationOpConfigTest, CountLoweringsSet) {
      int mlir_lowering_count = 0;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 30 03:31:01 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.h

        }
        return success();
      }
    
      virtual bool IsValueInitValue(const DenseElementsAttr& attr) const = 0;
    };
    
    // Returns true if the given reduce op can be legalized to ArgMax/ArgMin ops.
    std::optional<bool> IsReduceOpLegal(mhlo::ReduceOp reduce_op);
    
    }  // namespace odml
    }  // namespace mlir
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/split_merged_operands.cc

        if (inserted_value) continue;
        // We can only clone the constant op or const->dequantize combo. The latter
        // case is useful for float16 quantization. Since all ops have been
        // legalized to tflite ops, so we only care about ConstOp or QConstOp or
        // mlir constant op.
        Operation* input_op = operand.getDefiningOp();
        if (input_op == nullptr) return failure();
    
        Attribute attr;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc

          TypeID::get<TF::ModOp>(),
    
          // MatrixDiagPartV3 should use the MLIR implementation due to performance.
          TypeID::get<TF::MatrixDiagPartV3Op>(),
    
          // Ops that are legalized in the old bridge using MlirXlaOpKernel
          TypeID::get<TF::AbsOp>(),
          TypeID::get<TF::AtanOp>(),
          TypeID::get<TF::AvgPool3DOp>(),
          TypeID::get<TF::BiasAddGradOp>(),
          TypeID::get<TF::CeilOp>(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 21.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

        Value output_lhs =
            bmm_op.getAdjX() ? create_z_x_transpose_op(input_lhs) : input_lhs;
    
        // The rhs need to be transposed if adj_y == false AND this matmul will be
        // legalized to tfl.fully_connected
        Value output_rhs =
            !bmm_op.getAdjY() ? create_z_x_transpose_op(input_rhs) : input_rhs;
    
        Type output_type = bmm_op.getResult().getType();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
Back to top