Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 20 for Platen (0.2 sec)

  1. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

      "TFL::IsReducedTailOfShape($0.getType(), $1.getType())">>;
    
    def IsRankLessThanEqualTo : Constraint<CPred<
      "$0.getType().cast<ShapedType>().getRank() <= "
      "$1.getType().cast<ShapedType>().getRank()">>;
    
    def Flatten : NativeCodeCall<
      "$0.cast<DenseElementsAttr>()"
        ".reshape(RankedTensorType::get({$0.getType().cast<ShapedType>().getNumElements()}, "
                                       "$0.getType().cast<ShapedType>().getElementType()))">;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc

          rhs,
          DenseIntElementsAttr::get(
              RankedTensorType::get({rhs_rank}, rewriter.getI64Type()),
              rhs_permutation));
      // Reshapes lhs to flatten out_dimensions and contracting_dimensions.
      llvm::SmallVector<int64_t, 4> lhs_flattened_shape = Concat<int64_t>(
          lhs_dot_dimensions_info.batch_dimensions().SizesArray(),
          llvm::ArrayRef<int64_t>{
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.2K bytes
    - Viewed (0)
  3. tensorflow/cc/experimental/libtf/function.cc

    // TODO(b/190203981): Move to a separate nest-like library.
    void Flatten(const TaggedValue& value,
                 std::vector<AbstractTensorHandle*>* flat_args) {
      if (value.type() == TaggedValue::Type::TENSOR) {
        flat_args->emplace_back(value.tensor().get());
      } else if (value.type() == TaggedValue::Type::TUPLE) {
        for (const auto& t : value.tuple()) {
          Flatten(t, flat_args);
        }
      } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 04 19:49:06 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc

        llvm::StringRef equation, RankedTensorType lhs_ty) {
      llvm::StringRef lhs;
      llvm::StringRef out;
      std::tie(lhs, out) = equation.split("->");
      if (lhs.empty() || out.empty()) return std::nullopt;
    
      // Try to flatten the "..." if possible.
      int lhs_named_label, rhs_named_label;
    
      // following rhs and rhs_ty variables are non-functional here only created to
      // comply with the existing API
      llvm::StringRef rhs;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/optimize.cc

             (std::equal(i1, reduced_e1, i2));
    }
    
    // Check if the value of the last dimension of type1 is equal to the number of
    // elements in type2. This is a required condition to flatten type2 to form a
    // 1D array and allow the binaryOp handle the broadcasting implicitly.
    bool IsLastDimEqualToNumElements(Type type1, Type type2) {
      return (mlir::cast<ShapedType>(type1).getRank() >= 1 &&
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/translate/import_model.cc

    // linearized function argument or return on a FunctionDef, and hence to an
    // mlir::func::FuncOp argument / return.
    //
    // This must match the linearization that happens in `tf.nest.flatten`.
    // In particular, dict values should be linearized in sorted key order.
    //
    // The linearized index paths can be returned back to a structured
    // representation (e.g. to emit C structs matching a signature) with a simple
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 183.2K bytes
    - Viewed (0)
  7. RELEASE.md

    @chanis, Chenyang Liu, Corey Wharton, Daeyun Shin, Daniel Julius Lasiman, Daniel
    Waterworth, Danijar Hafner, Darren Garvey, Denis Gorbachev, @DjangoPeng,
    Egor-Krivov, Elia Palme, Eric Platon, Fabrizio Milo, Gaetan Semet, Georg
    Nebehay, Gu Wang, Gustav Larsson, @haosdent, Harold Cooper, Hw-Zz, @ichuang,
    Igor Babuschkin, Igor Macedo Quintanilha, Ilya Edrenkin, @ironhead, Jakub
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tf_tfl_passes.cc

      // Add inline pass.
      pass_manager.addPass(mlir::createInlinerPass());
    
      // Expands mhlo.tuple ops.
      pass_manager.addPass(
          mlir::mhlo::createExpandHloTuplesPass(entry_function_name.str()));
      // Flatten tuples for control flows.
      pass_manager.addNestedPass<mlir::func::FuncOp>(
          mlir::mhlo::createFlattenTuplePass());
    
      mlir::odml::AddMhloOptimizationPasses(
          pass_manager,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 25.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc

          rhs,
          DenseIntElementsAttr::get(
              RankedTensorType::get({rhs_rank}, rewriter.getI64Type()),
              rhs_permutation));
    
      // Reshapes lhs to flatten out_dimensions and contracting_dimensions.
      llvm::SmallVector<int64_t, 4> lhs_flattened_shape = Concat<int64_t>(
          lhs_dot_dimensions_info.batch_dimensions().SizesArray(),
          llvm::ArrayRef<int64_t>{
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 154.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc

    }
    
    // Determine if op commutes with transposes. Requires a strict
    // definition of Elementwise, all i/o shapes and types must be same-rank
    // broadcastable and fully static. Consider moving this into attribute later.
    bool IsElementwise(Operation *op) {
      if (!(llvm::isa<TFL::AddOp, TFL::MulOp, TFL::DivOp, TFL::SubOp,
                      TFL::MaximumOp, TFL::MinimumOp>(op))) {
        return false;
      }
    
      auto opr1_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.5K bytes
    - Viewed (0)
Back to top