- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 45 for vecotr (0.14 sec)
-
src/crypto/internal/hpke/testdata/rfc9180-vectors.json
Roland Shoemaker <******@****.***> 1715710616 -0700
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:33:33 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_operator.h
return std::vector<double>(vec.begin(), vec.end()); } return std::vector<double>(); } // Handles the case when the DenseElementsAttr doesn't exist, and when it // doesn't returns a vector of length `default_size` all with the same value // `default_value`. template <typename T> static inline std::vector<T> GetOptionalVector(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 21:00:09 UTC 2024 - 11.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op_test.cc
class CalibrationStatisticsSaverTest : public OpsTestBase {}; TEST_F(CalibrationStatisticsSaverTest, MissingOutputPath) { std::vector<std::string> ids{"1"}; std::vector<int32_t> calibration_methods{ CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX}; std::vector<NodeDefBuilder::NodeOut> inputs; inputs.emplace_back("min", 0, DT_FLOAT); inputs.emplace_back("max", 0, DT_FLOAT);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 13 01:31:23 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_operator.cc
} static mlir::Attribute BuildRankedTensorAttr(std::vector<int64_t> shape, std::vector<bool> value, mlir::Builder builder) { // The implementation of getBoolVectorAttr is flawed, so we bypass it here std::vector<llvm::APInt> extendVec; extendVec.resize(value.size());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 38K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
Status PreparePjRtExecutableArguments( int num_missing_prefix_ctx_inputs, const std::vector<int>& input_mapping, const std::vector<const Tensor*>& inputs, const absl::flat_hash_map<int, const Tensor*>& variable_snapshots, xla::PjRtClient* pjrt_client, xla::PjRtDevice* pjrt_device, bool use_pjrt_tensor_buffer, std::vector<xla::PjRtBuffer*>* args, std::vector<std::unique_ptr<xla::PjRtBuffer>>* owned_args,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
std::vector<int> ConstantsVector(OpKernelConstruction* ctx) { DataTypeVector constant_types; OP_REQUIRES_OK_RETURN(ctx, std::vector<int>(), ctx->GetAttr("Tconstants", &constant_types)); std::vector<int> constants(constant_types.size()); std::iota(constants.begin(), constants.end(), 0); return constants; } std::vector<int> ResourcesVector(OpKernelConstruction* ctx) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_funcs, const std::vector<tensorflow::TensorShape>& arg_shapes, std::vector<tpu::ShardingAndIndex>* arg_core_mapping, std::vector<std::vector<xla::Shape>>* per_core_arg_shapes, xla::CompileOnlyClient* client, XlaCompiler::CompilationResult* compilation_result) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc
auto client = xla::ClientLibrary::GetOrCreateCompileOnlyClient(cpu_platform).value(); std::vector<TensorShape> arg_shapes; TPUCompileMetadataProto metadata_proto; bool use_tuple_args = true; std::vector<ShardingAndIndex> arg_core_mapping; std::vector<std::vector<xla::Shape>> per_core_arg_shapes; std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes; // This doesn't actually compile correctly.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 13 23:59:33 UTC 2024 - 16.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/saved_model_to_tfl_flatbuffer.cc
mlir::MLIRContext context; mlir::quant::QuantizationSpecs quant_specs; // Parse input arrays. std::vector<string> node_names; std::vector<string> node_dtypes; std::vector<std::optional<std::vector<int>>> node_shapes; std::vector<std::optional<double>> node_mins; std::vector<std::optional<double>> node_maxs; // Populate quantization specs. TF_RETURN_IF_ERROR(internal::PopulateQuantizationSpecs(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc
const toco::ModelFlags& model_flags, toco::TocoFlags& toco_flags, mlir::quant::QuantizationSpecs* quant_specs, std::vector<std::string>* node_names, std::vector<std::string>* node_dtypes, std::vector<std::optional<std::vector<int>>>* node_shapes, std::vector<std::optional<double>>* node_mins, std::vector<std::optional<double>>* node_maxs) { quant_specs->inference_input_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 17.3K bytes - Viewed (0)