Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for unconditionally_use_set_output_shapes (0.5 sec)

  1. tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.cc

      specs.upgrade_legacy = import_options.upgrade_legacy;
      specs.enable_shape_inference = import_options.enable_shape_inference;
      specs.unconditionally_use_set_output_shapes =
          import_options.unconditionally_use_set_output_shapes;
      specs.xla_compile_device_type = import_options.xla_compile_device_type;
      specs.enable_soft_placement = import_options.enable_soft_placement;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 11:51:44 UTC 2024
    - 14.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc

          debug_info_file,        xla_compile_device_type,
          prune_unused_nodes,     convert_legacy_fed_inputs,
          graph_as_function,      upgrade_legacy,
          enable_shape_inference, unconditionally_use_set_output_shapes,
          enable_soft_placement,  set_original_tf_func_name};
    
      auto module_or = tensorflow::GraphdefToMlirTranslateFunction(
          input, input_arrays, input_dtypes, input_shapes, output_arrays,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 22:19:26 UTC 2024
    - 7.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h

      std::string xla_compile_device_type;
      bool prune_unused_nodes;
      bool convert_legacy_fed_inputs;
      bool graph_as_function;
      bool upgrade_legacy;
      bool enable_shape_inference;
      bool unconditionally_use_set_output_shapes;
      bool enable_soft_placement;
      bool set_original_tf_func_name = false;
    };
    
    // TODO(antiagainst): Directly manipulating files in library functions is not
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/python/graphdef_to_tfl_flatbuffer.cc

      specs.prune_unused_nodes = true;
      specs.convert_legacy_fed_inputs = true;
      specs.graph_as_function = false;
      specs.upgrade_legacy = true;
      specs.unconditionally_use_set_output_shapes = true;
      internal::WarningUnusedFlags(model_flags, toco_flags);
    
      // Register all custom ops, including user-specified custom ops.
      TF_RETURN_IF_ERROR(internal::RegisterAllCustomOps(toco_flags));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

            input_filename, tags, exported_names, context,
            /*unconditionally_use_set_output_shapes=*/true);
        if (!module_or.status().ok()) return module_or.status();
        return std::move(module_or).value();
      } else if (saved_model_version == 1) {
        MLIRImportOptions options;
        options.upgrade_legacy = specs.upgrade_legacy;
        options.unconditionally_use_set_output_shapes = true;
        options.lift_variables = enable_variable_lifting;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/translate/import_model.h

    absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertSavedModelV1ToMlirLite(
        SavedModelMLIRImportInput& input,
        std::optional<absl::Span<const std::string>> exported_names,
        mlir::MLIRContext* context,
        bool unconditionally_use_set_output_shapes = false);
    
    // Serialize a MLIR module to a string.
    std::string MlirModuleToString(mlir::ModuleOp module,
                                   mlir::OpPrintingFlags flags);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.cc

      ss << "\nrestrict_functionalization_to_compiled_nodes: "
         << restrict_functionalization_to_compiled_nodes;
      ss << "\nenable_shape_inference: " << enable_shape_inference;
      ss << "\nunconditionally_use_set_output_shapes: "
         << unconditionally_use_set_output_shapes;
      ss << "\nxla_compile_device_type: " << xla_compile_device_type;
    
      return ss.str();
    }
    
    Status ParseOutputArrayInfo(absl::string_view array_names,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 10.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h

      // not always set correctly (which is undesirable and should be addressed)
      // so make it opt-in to consider it unconditionally also when importing the
      // graph.
      bool unconditionally_use_set_output_shapes = false;
      // If set, use the value as the device type and mark the function graph for
      // XLA compilation.
      string xla_compile_device_type;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 04:56:10 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/translate/import_model.cc

        // added correctly.
        GraphImportConfig specs;
        specs.enable_shape_inference = specs_.enable_shape_inference;
        specs.unconditionally_use_set_output_shapes =
            specs_.unconditionally_use_set_output_shapes;
        for (const auto& name_and_value : func_def->attr()) {
          if (name_and_value.first == "_input_shapes") {
            auto& list = name_and_value.second.list();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 183.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc

      // Some graphs may require _output_shapes (an unregistered attribute)
      // to override shapes. It is unfortunately not always set correctly so only
      // do it optionally.
      config.unconditionally_use_set_output_shapes =
          unconditionally_use_set_output_shapes;
      return ConvertGraphToMlir(graph, debug_info, flib_def, config, context);
    }
    
    Status BuildHloFromGraph(
        const Graph& graph, xla::XlaBuilder& builder,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 17:24:39 UTC 2024
    - 45.3K bytes
    - Viewed (0)
Back to top