- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 78 for getDefiningOp (0.29 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.cc
value_select.getOperand(0).getDefiningOp()); if (!value_gt || value_gt.getComparisonDirection() != compare_direction_included || value_gt.getLhs() != body.getArgument(0) || value_gt.getRhs() != body.getArgument(2)) return failure(); mhlo::SelectOp index_select = llvm::dyn_cast_or_null<mhlo::SelectOp>( return_op.getOperand(1).getDefiningOp()); if (!index_select) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 20:53:17 UTC 2024 - 8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.cc
SmallVector<ElementsAttr> inputs; for (auto operand : op->getOperands()) { auto preceding_const_op = operand.getDefiningOp<TF::ConstOp>(); if (preceding_const_op) { inputs.push_back(preceding_const_op.getValue()); continue; } Operation* preceding_op = operand.getDefiningOp(); int preceding_result_id = -1; for (auto preceding_result : preceding_op->getResults()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_avg_pool_patterns.td
// See the function doc in the header file. def GetPadOpType : NativeCodeCall< "GetPadOpType((*$0.begin()).getDefiningOp<mhlo::CompositeOp>())">; // See the function doc in the header file. def GetAvgPoolOpPadAttr: NativeCodeCall<"GetAvgPoolOpPadAttr($_builder, (*$0.begin()).getDefiningOp<mhlo::CompositeOp>())">; // Returns true if the provided padding in the composite op can *not* be
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:16:05 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc
Value rhs = mul_op.getRhs(); conv_op = lhs.getDefiningOp<mhlo::ConvolutionOp>(); if (conv_op == nullptr) { return failure(); } filter = conv_op.getRhs().getDefiningOp<mhlo::ConstantOp>(); if (filter == nullptr) { return failure(); } // Try to match static broadcast or dynamic broadcast. bcast_or_const_op = rhs.getDefiningOp(); bool is_dynamic_broadcast =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 22:21:19 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_reorder_replicate_and_partitioned_inputs.cc
TF::TPUReplicatedInputOp replicated_input) { if (!llvm::all_of(replicated_input.getInputs(), [](Value input) { return llvm::isa_and_nonnull<TF::TPUPartitionedInputV2Op>( input.getDefiningOp()); })) return replicated_input.emitOpError() << "expects all inputs from 'tf.TPUPartitionedInputV2' ops"; const auto metadata_iter =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 24 23:08:55 UTC 2023 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_patterns.td
def NoFallbackAttrNotSet : Constraint<CPred< "!$0.getDefiningOp()->hasAttr(kNoFallbackAttr)">>; class FloatValueEquals<string val> : Constraint<CPred< "FloatValueEquals($0, " # val # ")">>; class RankEquals<string rank> : Constraint<CPred< "RankEquals($0, " # rank # ")">>; def IsFusibleWithBias : Constraint<CPred< "IsFusibleWithBiasOp($0.getDefiningOp())">>; // Folds TF IdentityOp with constant input.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 29 21:02:21 UTC 2022 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/optimize.td
include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td" include "mhlo/IR/hlo_ops.td" include "stablehlo/dialect/ChloOps.td" def IsDefinedByConvOrDotGeneralOp : Constraint<Or<[ CPred<"$0.getDefiningOp<mhlo::ConvolutionOp>()">, CPred<"$0.getDefiningOp<mhlo::DotGeneralOp>()">]>>; def IsNull : Constraint<CPred<"!$0">>; // This pattern optimizes: // conv/dot_general + a + b -> conv/dot_general + (a + b)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Feb 24 02:26:47 UTC 2024 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc
bool NotFromDequant(mlir::Value value) { auto dequant_op = value.getDefiningOp<DequantizeOp>(); if (dequant_op) { return false; } auto split_op = value.getDefiningOp<SplitOp>(); if (!split_op) { return true; } return !split_op.getValue().getDefiningOp<DequantizeOp>(); } // Optimize TFLite operations in functions. class OptimizeBatchMatmulPass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.td
// `tf_quant.composite_function` attribute. def IsNotInLiftedFunc : Constraint<CPred<"!IsInLiftedFunc($0.getDefiningOp())">>; // Checks if the value is not inside a StableHLO op with region. def IsNotInStableHloOpRegion : Constraint<CPred<"!IsInStableHloOpRegion($0.getDefiningOp())">>; // Checks if the given einsum op is supported for XlaDotV2 quantization. def IsEinsumSupportedByXlaDotV2 :
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 25 00:32:20 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc
batch_norm = dyn_cast_or_null<FusedBatchNormV3Op>(add_op.getX().getDefiningOp()); if (batch_norm) { side_input = add_op.getY(); } else { // Didn't get a FusedBatchNorm on the LHS of the AddV2, try the RHS. batch_norm = dyn_cast_or_null<FusedBatchNormV3Op>(add_op.getY().getDefiningOp()); if (!batch_norm) return failure(); side_input = add_op.getX(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 5.2K bytes - Viewed (0)