- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 104 for lowerings (0.69 sec)
-
src/cmd/compile/internal/ssa/rewriteCond_test.go
one64 int64 = 1 one32 int32 = 1 v64 int64 = 11 // ensure it's not 2**n +/- 1 v64_n int64 = -11 v32 int32 = 11 v32_n int32 = -11 uv32 uint32 = 19 uz uint8 = 1 // for lowering to SLL/SRL/SRA ) var crTests = []struct { name string tf func(t *testing.T) }{ {"AddConst64", testAddConst64}, {"AddConst32", testAddConst32}, {"AddVar64", testAddVar64},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 24 01:19:09 UTC 2023 - 11.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-prefer-tf2xla.mlir
// ----- // CHECK-LABEL: func @random_uniform_simple func.func @random_uniform_simple(%arg0: tensor<3xi32>) -> tensor<12x?x64xf32> { // expected-remark@+1 {{lowering requires operand #0 to be a constant}} %0 = "tf.RandomUniform"(%arg0) {device = "", seed = 0 : i64, seed2 = 0 : i64} : (tensor<3xi32>) -> tensor<12x?x64xf32> func.return %0 : tensor<12x?x64xf32> } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 15.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
float half_range = is_unsigned ? 0 : 128; return GetScalarOfType(builder->getF32Type(), half_range); } // Returns reduction indices to use while lowering tf.BiasAddGrad op to tf.Sum // op. DenseIntElementsAttr GetBiasAddGradReductionIndices(int64_t rank, StringAttr data_format,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc
auto ranked_ty = mlir::dyn_cast<ShapedType>(ty); // Only bounded operands are supported in the XLA builders. if (!IsBounded(ranked_ty)) { return op_->emitRemark() << "lowering requires bounded tensor operands " << ranked_ty; } } if (HasSymbolRefAttr(op_)) { return op_->emitRemark() << "ops with symbol references are not supported"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 18.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
bool preserve_assert_op) { this->run_tfl_runtime_verification_ = run_tfl_runtime_verification; this->preserve_assert_op_ = preserve_assert_op; } /// Performs the lowering to TFLite dialect. void runOnOperation() override; }; // Util that casts 'val' to Int32 by adding a cast Op. Value CreateCastToInt32(Value val, Location loc, PatternRewriter& rewriter) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/sccp.go
constValue.reset(OpInvalid) return lattice{bottom, nil} } func (t *worklist) visitValue(val *Value) { if !possibleConst(val) { // fast fail for always worst Values, i.e. there is no lowering happen // on them, their lattices must be initially worse Bottom. return } oldLt := t.getLatticeCell(val) defer func() { // re-visit all uses of value if its lattice is changed
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jan 22 16:54:50 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc
TypeID::get<TF::AssertOp>(), // TF2XLA fallback pattern doesn't support these op as MLIR hlo builder // doesn't override the necessary builder methods. These ops have simple // lowering pattern so this should be safe. TypeID::get<TF::CrossReplicaSumOp>(), TypeID::get<TF::InfeedDequeueTupleOp>(), TypeID::get<TF::OutfeedEnqueueTupleOp>(), TypeID::get<TF::XlaShardingOp>(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 04:08:35 UTC 2024 - 21.7K bytes - Viewed (0) -
src/cmd/compile/internal/dwarfgen/dwinl.go
// In the top-level case (ii=0) this can happen // because a composite variable was split into pieces, // and we're looking at a piece. We can also see // return temps (~r%d) that were created during // lowering, or unnamed params ("_"). v.ChildIndex = int32(synthCount) synthCount++ } } } // Make a second pass through the progs to compute PC ranges for // the various inlined calls.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 26 20:45:07 UTC 2024 - 12.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/remat/rematerializer_test.cc
remat.RunGreedyAlgorithm(/*max_cost=*/1, /*max_block_length=*/1, /*min_savings=*/1); // Only as single remat is done -- this will be the best one possible, // lowering the profile by 8 instead of the maximum 1 + 2 + 4 + 8. EXPECT_THAT(remat.GetMemProfile(), ElementsAreArray({1, 3, 7, 15, 23, 23, 15, 15, 7, 3, 1})); } TEST_F(GreedyRematTest, SimpleForbiddenOps) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 14 20:57:44 UTC 2023 - 19.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
//===----------------------------------------------------------------------===// // Inv op patterns. //===----------------------------------------------------------------------===// def LowerInv : Pat<(TF_InvOp $x), (TF_ReciprocalOp $x)>; //===----------------------------------------------------------------------===// // Inf op patterns.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0)