Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 457 for vecotr (0.3 sec)

  1. tensorflow/cc/saved_model/loader.cc

    Status RunOnce(const RunOptions& run_options,
                   const std::vector<std::pair<string, Tensor>>& inputs,
                   const std::vector<string>& output_tensor_names,
                   const std::vector<string>& target_node_names,
                   std::vector<Tensor>* outputs, RunMetadata* run_metadata,
                   Session* session) {
      CallableOptions callable_options;
      std::vector<Tensor> feed_tensors;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 04:36:00 UTC 2024
    - 23K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_launch_util.h

    Status PreparePjRtExecutableArguments(
        int num_missing_prefix_ctx_inputs, const std::vector<int>& input_mapping,
        const std::vector<const Tensor*>& inputs,
        const absl::flat_hash_map<int, const Tensor*>& variable_snapshots,
        xla::PjRtClient* pjrt_client, xla::PjRtDevice* pjrt_device,
        bool use_pjrt_tensor_buffer, std::vector<xla::PjRtBuffer*>* args,
        std::vector<std::unique_ptr<xla::PjRtBuffer>>* owned_args,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.cc

                                  const absl::string_view inference_type,
                                  QuantizationSpecs* quant_specs) {
      const std::vector<std::string> input_nodes = absl::StrSplit(node_names, ',');
      std::vector<std::optional<double>> node_mins;
      if (!min_values.empty()) {
        std::vector<std::string> node_mins_str = absl::StrSplit(min_values, ',');
        for (const std::string& node_mins_str : node_mins_str) {
          double value;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h

                               absl::string_view shapes,
                               GraphImportConfig::InputArrays* inputs);
    
    Status ParseInputArrayInfo(
        const std::vector<string>& node_names,
        const std::vector<string>& node_dtypes,
        const std::vector<std::optional<std::vector<int>>>& node_shapes,
        GraphImportConfig::InputArrays* inputs);
    
    // Parses shapes from the given string into shapes_vector which is a structured
    // format.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 04:56:10 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h

    absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>>
    GraphdefToMlirTranslateFunction(
        llvm::StringRef input, const std::vector<std::string>& input_arrays,
        const std::vector<std::string>& input_dtypes,
        const std::vector<std::optional<std::vector<int>>>& input_shapes,
        const std::vector<std::string>& output_arrays,
        const std::vector<std::string>& control_output_arrays,
        const GraphdefToMlirOptions& import_options, mlir::MLIRContext* context);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/flatbuffer_operator.cc

    }
    
    static mlir::Attribute BuildRankedTensorAttr(std::vector<int64_t> shape,
                                                 std::vector<bool> value,
                                                 mlir::Builder builder) {
      // The implementation of getBoolVectorAttr is flawed, so we bypass it here
      std::vector<llvm::APInt> extendVec;
      extendVec.resize(value.size());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 38K bytes
    - Viewed (0)
  7. tensorflow/c/eager/gradient_checker.cc

      int num_elems = TF_TensorElementCount(theta_tensor);
      vector<float> theta_data(num_elems);
      memcpy(theta_data.data(), TF_TensorData(theta_tensor),
             TF_TensorByteSize(theta_tensor));
    
      // Initialize space for the numerical gradient.
      vector<float> dtheta_approx(num_elems);
    
      // Get theta shape and store in theta_dims.
      int num_dims = TF_NumDims(theta_tensor);
      vector<int64_t> theta_dims(num_dims);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 15 09:49:45 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h

        const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
        XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_funcs,
        const std::vector<tensorflow::TensorShape>& arg_shapes,
        std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
        std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
        xla::CompileOnlyClient* client,
        XlaCompiler::CompilationResult* compilation_result);
    
    }  // namespace v1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 08:08:57 UTC 2024
    - 2.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_compile_on_demand_op.cc

    // will be cleared before populating them.
    Status GetAndLockVariablesAndBuildXlaCompilerArguments(
        const OpKernelContext& ctx, const std::vector<const Tensor*>& inputs,
        const std::vector<int>& constant_indices,
        const std::vector<int>& variable_indices,
        std::vector<VariableInfo>* variables,
        std::vector<XlaCompiler::Argument>* args) {
      variables->clear();
      args->clear();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 29 08:39:39 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  10. tensorflow/c/c_api.cc

        // Input tensors
        const std::vector<std::pair<string, Tensor>>& input_pairs,
        // Output tensors
        const std::vector<string>& output_tensor_names, TF_Tensor** c_outputs,
        // Target nodes
        const std::vector<string>& target_oper_names, TF_Buffer* run_metadata,
        TF_Status* status) {
      const int noutputs = output_tensor_names.size();
      std::vector<Tensor> outputs(noutputs);
      Status result;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 03:35:10 UTC 2024
    - 102.3K bytes
    - Viewed (0)
Back to top