Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for CreateVector (0.24 sec)

  1. tensorflow/compiler/mlir/lite/flatbuffer_export.cc

          gather_op.getDimensionNumbers().getStartIndexMap().end());
    
      auto offset_dims = builder_.CreateVector(offset_dims_vec);
      auto collapsed_slice_dims = builder_.CreateVector(collapsed_slice_dims_vec);
      auto start_index_map = builder_.CreateVector(start_index_map_vec);
      auto slice_sizes = builder_.CreateVector(
          mlir::GetOptionalVector<int64_t>(gather_op.getSliceSizes()));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 164.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter_test.cc

          CreateOpMetadata(
              fb_builder, 3, 1,
              fb_builder.CreateVector(std::vector<float>({-1.0, 2.0}))),
      };
      const auto subgraphs = {CreateSubgraphMetadata(
          fb_builder, fb_builder.CreateVector(ops.begin(), ops.size()))};
    
      const auto metadata = CreateRuntimeMetadata(
          fb_builder, hardwares,
          fb_builder.CreateVector(subgraphs.begin(), subgraphs.size()));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 06:11:34 UTC 2024
    - 6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.cc

          flatbuffers::Offset<flatbuffers::Vector<float>> per_device_cost_offset;
    
          if (per_device_cost.has_value()) {
            per_device_cost_offset = builder->CreateVector(*per_device_cost);
          }
    
          OpMetadataBuilder op_builder(*builder);
          op_builder.add_index(index);
          uint8_t hardware = hardware_map.at(*device_name);
          op_builder.add_hardware(hardware);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 06:11:34 UTC 2024
    - 7.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/schema/schema_generated.h

      auto window_strides__ = window_strides ? _fbb.CreateVector<int64_t>(*window_strides) : 0;
      auto base_dilations__ = base_dilations ? _fbb.CreateVector<int64_t>(*base_dilations) : 0;
      auto window_dilations__ = window_dilations ? _fbb.CreateVector<int64_t>(*window_dilations) : 0;
      auto padding__ = padding ? _fbb.CreateVector<int64_t>(*padding) : 0;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 1M bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/converter_gen.cc

           << "flatbuffers::FlatBufferBuilder *fbb) {\n";
    
        // Inputs & outputs
        os << "  auto inputs = fbb->CreateVector(operands);\n"
              "  auto outputs = fbb->CreateVector(results);\n\n";
        // Intermediates for LSTM.
        if (has_intermediates) {
          os << "  auto intermediates = fbb->CreateVector(intermediate_index);\n";
        }
    
        // Build the FlatBuffer operator
        os << "  return tflite::CreateOperator(\n"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 19 15:05:28 UTC 2023
    - 23.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/flatbuffer_operator.cc

      std::vector<int32_t> intVec;
      intVec.reserve(attrArray.getValue().size());
      for (auto attr : attrArray.getValue()) {
        intVec.push_back(mlir::cast<mlir::IntegerAttr>(attr).getInt());
      }
      return builder->CreateVector(intVec);
    }
    
    static flatbuffers::Offset<flatbuffers::Vector<float>>
    ConvertF32ArrayAttrForOptionWriter(mlir::ArrayAttr attrArray,
                                       flatbuffers::FlatBufferBuilder* builder) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 38K bytes
    - Viewed (0)
Back to top