Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 118 for output_types (0.15 sec)

  1. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

                                                  RankedTensorType output_type,
                                                  int64_t axis) {
      const auto outer_dims = output_type.getShape().take_front(axis);
      const int64_t outer_size = std::accumulate(
          outer_dims.begin(), outer_dims.end(), 1, std::multiplies<int64_t>());
    
      const auto base_inner_dims = output_type.getShape().drop_front(axis + 1);
      const int64_t base_inner_size =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  2. build/pause/Makefile

    	docker buildx build --provenance=false --sbom=false --pull --output=type=${OUTPUT_TYPE} --platform ${OS}/$(ARCH) \
    		-t $(IMAGE):$(TAG)-${OS}-$(ARCH) --build-arg BASE=${BASE} --build-arg ARCH=$(ARCH) .
    	touch $@
    
    .container-windows-$(ARCH): $(foreach binary, ${BIN}, bin/${binary}-${OS}-${ARCH})
    	docker buildx build --provenance=false --sbom=false --pull --output=type=${OUTPUT_TYPE} --platform ${OS}/$(ARCH) \
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Thu May 23 19:31:40 UTC 2024
    - 6.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

        // before broadcasting.
        if (operand_type.getRank() < output_type.getRank()) {
          input = InsertExpandDimsOp(op, rewriter, input, output_type.getRank());
        }
    
        SmallVector<int32_t> broadcast_shape =
            CastI64ArrayToI32(output_type.getShape()).value();
        TensorType broadcast_shape_type =
            output_type.cloneWith({output_type.getRank()}, rewriter.getI32Type());
        auto broadcast_shape_attr =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc

          auto *value_op = assign_variable_op.getValue().getDefiningOp();
          auto dq_op = dyn_cast_or_null<DequantizeOp>(value_op);
          if (dq_op) {
            Type output_type = dq_op.getInput().getType();
            auto qtype = quant::QuantizedType::getQuantizedElementType(output_type);
            if (qtype == quant::QuantizedType::getQuantizedElementType(ref_qtype)) {
              // Same quantization parameters, remove it.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h

    // to the `output_buffer`. Both `model_buffer` and `output_buffer` should be a
    // valid FlatBuffer format for Model supported by TFLite.
    //
    // The `input_type`, `output_type` and `inference_type` can be float32 / qint8 /
    // int8 / int16.
    //
    // Returns a partially quantized model if `fully_quantize` is false. Returns a
    // non-OK status if the quantization fails.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc

          flow_type.getRank() != 4) {
        return func_.emitWarning() << "Flow should be a 4D float tensor";
      }
    
      auto output_type = mlir::dyn_cast_or_null<RankedTensorType>(
          func_.getFunctionType().getResult(0));
      if (!output_type || !output_type.getElementType().isF32() ||
          output_type.getRank() != 4) {
        return func_.emitWarning() << "Output should be a 4D float tensor";
      }
    
      return success();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/utils/tftext_utils.cc

               << "'hash_seed' attribute is not set or not an array";
      }
      auto output_type = GetResultType(func, 0);
      if (!output_type || !mlir::isa<FloatType>(output_type.getElementType()) ||
          !RankEquals(output_type, 2)) {
        return func.emitError() << "Output should be a 2D float tensor.";
      }
      if (output_type.getDimSize(1) != hash_seed.size()) {
        return func.emitError()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/c/c_api_unified_experimental_mlir.cc

          for (Type& type : llvm::make_range(&state_->types[original_size],
                                             state_->types.end())) {
            Type output_type;
            TF_RETURN_IF_ERROR(AddRef(type, &output_type));
            type = output_type;
          }
        }
      }
      for (auto& it : attrs_) state_->addAttribute(it.first(), it.second);
      *state = state_.get();
      return absl::OkStatus();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 28.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc

                   << ", output_inference_type: "
                   << tflite::EnumNameTensorType(output_type) << "\n";
      mlir::Builder mlir_builder(&context);
      mlir::Type input_mlir_type =
          tflite::ConvertElementType(input_type, mlir_builder);
      mlir::Type output_mlir_type =
          tflite::ConvertElementType(output_type, mlir_builder);
    
      if (fully_quantize) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc

        if (!input_dequant) return failure();
    
        if (!IsQI32Type(input_dequant.getType())) return failure();
    
        auto output_type =
            mlir::dyn_cast_or_null<ShapedType>(dequant_op.getOutput().getType());
        if (!output_type || !output_type.getElementType().isF32()) return failure();
    
        auto input_type = mlir::dyn_cast<ShapedType>(input_dequant.getType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
Back to top