- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 123 for input_dtype (0.24 sec)
-
tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc
LogicalResult CheckFusableLayerNormalizedLstmCellSimple( func::FuncOp lstm_func) { for (int i = 0; i < 5; ++i) { auto input = lstm_func.getArgument(i); auto input_type = mlir::dyn_cast_or_null<RankedTensorType>(input.getType()); if (!input_type) { lstm_func.emitWarning( "we cannot fuse this lstm func because all the inputs have not " "ranked tensor type."); return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_annotate_dynamic_shape_inputs.cc
for (int index : dynamic_shape_arg_index) { BlockArgument arg = func.getArgument(index); auto inputType = mlir::dyn_cast<RankedTensorType>(arg.getType()); // Only rank 1 tensor is supported for now. if (!inputType || inputType.getRank() != 1) continue; auto shape = llvm::to_vector<4>(inputType.getShape()); llvm::SmallVector<int64_t, 4> bounds(shape.begin(), shape.end()); // Mark the dim as dynamic dim.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tf_xla_mlir_translate.cc
if (!module_op) return mlir::failure(); llvm::SmallVector<XlaArgument, 4> xla_arguments; auto args_status = ParseXlaArguments( mlir::StringRefToView(input_shapes), mlir::StringRefToView(input_dtypes), mlir::StringRefToView(input_types), xla_arguments); if (!args_status.ok()) { LOG(ERROR) << args_status; return mlir::failure(); } XlaCompilationResult compilation_result; auto compilation_status =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 18.8K bytes - Viewed (0) -
platforms/core-configuration/model-core/src/integTest/groovy/org/gradle/api/provider/PropertyAssignmentIntegrationTest.groovy
def initValue = inputType.contains("Map<") ? "[:]" : "[]" def inputDeclaration = "$inputType input = $initValue" groovyBuildFile(inputDeclaration, inputValue, operation) expect: runAndAssert("myTask", expectedResult) where:
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Dec 28 14:39:49 UTC 2023 - 36.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h
// Quantizes the input model represented as `model_buffer` and writes the result // to the `output_buffer`. Both `model_buffer` and `output_buffer` should be a // valid FlatBuffer format for Model supported by TFLite. // // The `input_type`, `output_type` and `inference_type` can be float32 / qint8 / // int8 / int16. // // Returns a partially quantized model if `fully_quantize` is false. Returns a // non-OK status if the quantization fails. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
<< ", input_inference_type: " << tflite::EnumNameTensorType(input_type) << ", output_inference_type: " << tflite::EnumNameTensorType(output_type) << "\n"; mlir::Builder mlir_builder(&context); mlir::Type input_mlir_type = tflite::ConvertElementType(input_type, mlir_builder); mlir::Type output_mlir_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0) -
subprojects/core/src/main/java/org/gradle/api/internal/initialization/transform/ExternalDependencyInstrumentingArtifactTransform.java
File input = getInput().get().getAsFile(); InstrumentationInputType inputType = getInputType(input); switch (inputType) { case DEPENDENCY_ANALYSIS_DATA: doOutputTransformedFile(input, outputs); return; case ORIGINAL_ARTIFACT:
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Apr 18 15:08:33 UTC 2024 - 4.4K bytes - Viewed (0) -
platforms/software/dependency-management/src/main/java/org/gradle/api/internal/artifacts/ivyservice/ivyresolve/ComponentSelectionRulesProcessor.java
} List<Object> inputs = new ArrayList<>(inputTypes.size()); for (Class<?> inputType : inputTypes) { if (inputType == ComponentMetadata.class) { inputs.add(metadataProvider.getComponentMetadata()); continue; } if (inputType == IvyModuleDescriptor.class) { inputs.add(metadataProvider.getIvyModuleDescriptor());
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Tue Oct 10 21:10:11 UTC 2023 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_kernel_creator_test.cc
EXPECT_EQ("XTimesY", kernel_->name()); EXPECT_EQ("XTimesY", kernel_->type_string()); EXPECT_EQ(2, kernel_->num_inputs()); EXPECT_EQ(DT_FLOAT, kernel_->input_type(0)); EXPECT_EQ(DT_RESOURCE, kernel_->input_type(1)); EXPECT_EQ(DEVICE_MEMORY, kernel_->input_memory_types()[0]); EXPECT_EQ(HOST_MEMORY, kernel_->input_memory_types()[1]); EXPECT_EQ(1, kernel_->num_outputs());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 16 01:39:55 UTC 2023 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/group_by_dialect.cc
} std::vector<Value> inputs; std::vector<Value> outputs; computeInputsOutputs(ops, &inputs, &outputs); std::vector<Type> input_types; std::vector<Type> output_types; input_types.reserve(inputs.size()); for (Value v : inputs) { input_types.push_back(v.getType()); } output_types.reserve(outputs.size()); for (Value v : outputs) { output_types.push_back(v.getType()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 17 07:31:01 UTC 2023 - 8K bytes - Viewed (0)