- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 20 for output_types (0.23 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/tf_data_optimization.td
def FuseMapAndBatch : Pat< (TF_BatchDatasetV2Op (TF_MapDatasetOp $input_dataset, $other_arguments, $f, $output_types, $output_shapes, $use_inter_op_parallelism, $preserve_cardinality, $force_synchronous, $map_dataset_metadata), $batch_size, $drop_remainder, $parallel_copy, $batch_output_types,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/decompose_optionals.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/nms_utils.cc
Value iou_threshold = func_.getArgument(3); Value score_threshold = func_.getArgument(4); auto output_type0 = func_.getFunctionType().getResult(0); auto output_type1 = func_.getFunctionType().getResult(1); OpBuilder builder(func_.getBody()); auto op = builder.create<mlir::TFL::NonMaxSuppressionV4Op>( func_.getLoc(), output_type0, output_type1, boxes, scores, max_output_size, iou_threshold, score_threshold);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h
auto output = op->getResult(0); auto output_type = mlir::dyn_cast_or_null<mlir::RankedTensorType>(output.getType()); if (output_type == nullptr || !output_type.hasStaticShape()) return false; int64_t cols = 1; for (int i = 0; i < output_type.getRank() - 1; ++i) { cols *= output_type.getDimSize(i); } const int64_t cost_per_col = 2 * weight_type.getNumElements();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ModifyIONodesPass) explicit ModifyIONodesPass() {} explicit ModifyIONodesPass(mlir::Type input_type, mlir::Type output_type) { this->input_type = input_type; this->output_type = output_type; } void runOnOperation() override; private: // Assign the io types from the command line flag. This is only required for // tests.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc
auto input_type = mlir::dyn_cast<RankedTensorType>(dequantize_op.getOutput().getType()); auto output_type = mlir::dyn_cast<RankedTensorType>( passthrough_op->getResult(0).getType()); if (!input_type || !output_type || get_num_elements(input_type) <= get_num_elements(output_type)) { return failure(); } Type input_element_type = getElementTypeOrSelf(dequantize_op.getInput());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.1K bytes - Viewed (0) -
platforms/core-runtime/stdlib-java-extensions/src/main/java/org/gradle/internal/Cast.java
* * @param outputType The type to cast the input to * @param object The object to be cast (must not be {@code null}) * @param <O> The type to be cast to * @param <I> The type of the object to be vast * @return The input object, cast to the output type */ public static <O, I> O cast(Class<O> outputType, I object) { try {
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Jun 10 14:28:48 UTC 2024 - 3K bytes - Viewed (0) -
build/pause/Makefile
docker buildx build --provenance=false --sbom=false --pull --output=type=${OUTPUT_TYPE} --platform ${OS}/$(ARCH) \ -t $(IMAGE):$(TAG)-${OS}-$(ARCH) --build-arg BASE=${BASE} --build-arg ARCH=$(ARCH) . touch $@ .container-windows-$(ARCH): $(foreach binary, ${BIN}, bin/${binary}-${OS}-${ARCH}) docker buildx build --provenance=false --sbom=false --pull --output=type=${OUTPUT_TYPE} --platform ${OS}/$(ARCH) \
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu May 23 19:31:40 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc
auto *value_op = assign_variable_op.getValue().getDefiningOp(); auto dq_op = dyn_cast_or_null<DequantizeOp>(value_op); if (dq_op) { Type output_type = dq_op.getInput().getType(); auto qtype = quant::QuantizedType::getQuantizedElementType(output_type); if (qtype == quant::QuantizedType::getQuantizedElementType(ref_qtype)) { // Same quantization parameters, remove it.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h
// to the `output_buffer`. Both `model_buffer` and `output_buffer` should be a // valid FlatBuffer format for Model supported by TFLite. // // The `input_type`, `output_type` and `inference_type` can be float32 / qint8 / // int8 / int16. // // Returns a partially quantized model if `fully_quantize` is false. Returns a // non-OK status if the quantization fails. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 2.8K bytes - Viewed (0)