- Sort Score
- Result 10 results
- Languages All
Results 41 - 49 of 49 for do (0.23 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc
} return scale_spec; } bool IsOpQuantizableStableHlo(Operation* op) { if (isa<func::ConstantOp, mlir::stablehlo::ConstantOp>(op)) { // Constant ops do not have QuantizableResult attribute but can be // quantized. return true; } else if (op->hasTrait<OpTrait::IsTerminator>() || isa<quantfork::QuantizeCastOp, quantfork::DequantizeCastOp>(op)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lift_variables.cc
llvm::ArrayRef<TensorType> underlying_type = mlir::cast<TF::ResourceType>(arg_type.getElementType()).getSubtypes(); // If the arg type already matches the global_tensor type, we don't need // to do anything. if (!underlying_type.empty() && underlying_type[0] == global_tensor.getType()) { assert(underlying_type.size() == 1); continue; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 23 09:05:47 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/split_into_island_per_op_pass.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" // This pass is used in preparation for Graph export. // The GraphDef exporter expects each op to be in its own island. // This pass puts the IR in that form. // // We do this as an IR->IR transform to keep the Graph exporter as simple as // possible. namespace mlir { namespace TF { namespace { #define GEN_PASS_DEF_SPLITINTOISLANDPEROPPASS
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 17 07:31:01 UTC 2023 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/xla_rewrite.cc
non_resource_args.push_back(arg); if (has_resources) in_order = false; } else { resource_args.push_back(arg); has_resources = true; } } if (!in_order) { // Functions do not get reused in practice, so skip the check for if the // callee has been updated. StringAttr callee_sym = cluster_func_op.getFuncAttr().getAttr(); MoveResourceArgsToEnd(symtab.lookup<func::FuncOp>(callee_sym)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/unwrap_xla_call_module_op.cc
explicit UnwrapXlaCallModuleOpPass() = default; private: void runOnOperation() override; }; void UnwrapXlaCallModuleOp(TF::XlaCallModuleOp call_op, SymbolTable& symbol_table) { // Do not inline lifted quantized functions used for fusing patterns. // TODO - b/310539922: Remove reference to TF/TFL utils. if (call_op->hasAttr(kQuantTraitAttrName)) { return; } auto function_name = call_op
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/jit/cluster_scoping_pass.cc
starts.push_back(start); auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); }; DFSFrom(*graph_, starts, enter, /*leave=*/nullptr, /*stable_comparator=*/NodeComparatorName(), // Do not filter any edges to better capture the semantics of // transitive closure of successors. We may revisit this when // we see more cases needing cluster scoping in the future.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc
// Policy: // // Disable constant folding if operands size is greater than a certain // threshold (`kOperandsSizeThreshold`). // // Otherwise, allow folding if we do not know the shape of an operand or // result i.e., one of these values has non-static shape. If we know all the // shapes, find the total size of the operands and results. Folding of the op is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_reorder_replicate_and_partitioned_inputs.cc
const std::optional<::llvm::StringRef> op_xla_sharding = partitioned_input.get_XlaSharding(); const auto op_partition_dims = partitioned_input.getPartitionDims(); // Abort if TPUPartitionedInputV2(s) do not have the same attributes. if (!llvm::equal(partition_dims, op_partition_dims)) { return partitioned_input->emitOpError() << "expects partition_dims = " << partition_dims << " but found "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 24 23:08:55 UTC 2023 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_tensor_helper.cc
int64_t &common_dim = common_dims[i]; if (common_dim == ShapedType::kDynamic) { common_dim = dim; } else if (common_dim != dim) { // If mask_one_dim is true, do not emit an error if this is the only // dimension with mismatches. Note down the dimension to mask it from // the following types. if (mask_one_dim && dim_to_mask == ShapedType::kDynamic) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.7K bytes - Viewed (0)