- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 17 for Axis (0.04 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir
// CHECK-DAG: %[[ITEMS1_3:.*]] = "tf.ExpandDims"(%[[ITEMS1]]#3, %[[AXIS]]) // CHECK-DAG: %[[ITEMS1_2:.*]] = "tf.ExpandDims"(%[[ITEMS1]]#2, %[[AXIS]]) // CHECK-DAG: %[[ITEMS1_1:.*]] = "tf.ExpandDims"(%[[ITEMS1]]#1, %[[AXIS]]) // CHECK-DAG: %[[ITEMS1_0:.*]] = "tf.ExpandDims"(%[[ITEMS1]]#0, %[[AXIS]]) // CHECK-DAG: %[[ITEMS0_0:.*]] = "tf.ExpandDims"(%[[ITEMS0]], %[[AXIS]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 92K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 67.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
// to expand at the given `axis`. Type InferExpandDimsType(Type ty, int64_t axis, Builder *builder) { auto ranked_ty = mlir::dyn_cast<RankedTensorType>(ty); // Unranked type. if (!ranked_ty) return ty; auto shape = llvm::to_vector<4>(ranked_ty.getShape()); if (axis < 0) axis += ranked_ty.getRank() + 1; shape.insert(shape.begin() + axis, 1);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/unroll-batch-matmul.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 06 18:42:28 UTC 2023 - 63.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
// Eliminate cumulative summations if the input's dimension in axis is 1. def EliminateCumSumInclusive : Pat< (TFL_CumsumOp $input, (Arith_ConstantOp I32ElementsAttr:$axis), ConstBoolAttrFalse, $reverse), (replaceWithValue $input), [(AreInputDimensionsOneInAxes $input, $axis)]>; // Fusing raw computation of GELU op into one native tfl_gelu op. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir
// CHECK: %17 = "tfl.gather"(%arg13, %14) <{axis = 0 : i32, batch_dims = 0 : i32}> {tac.device = "DARWINN", tac.inference_type = "FLOAT"} : (tensor<5xi32>, tensor<?xi32>) -> tensor<?xi32> // CHECK: %18 = tfl.add %arg14, %17 {fused_activation_function = "NONE", tac.device = "DARWINN", tac.inference_type = "FLOAT"} : tensor<?xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad.cc
bool reverse; TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "reverse", &reverse)); attrs.reverse_ = !reverse; auto axis = op.input(1); auto sum = Cumsum(scope, grad_inputs[0], axis, attrs); grad_outputs->push_back(sum.out); grad_outputs->push_back(NoGradient()); return scope.status(); } REGISTER_GRADIENT_OP("Cumsum", CumsumGrad);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 50.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
// In above calculation, they are replaced by new values. These new mean and // variance are calculated as following: // new_mean = mean(x, axis=[0, 1, 2]) // new_variance = mean(squared_difference(x, new_mean), axis=[0, 1, 2]) // // The DDR rule for the is_training equals true case is as following: // def : Pattern< // (TF_FusedBatchNormV3Op:$root
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_import.cc
mlir::IntegerAttr axis; if (mins.size() > 1) { llvm::SmallVector<int64_t, 4> axis_stats_shape{ static_cast<int64_t>(mins.size()), 2}; axis_stats = mlir::DenseFPElementsAttr::get( tensorflow::GetTypeFromTFTensorShape(axis_stats_shape, b.getF32Type()), min_maxs); // TODO(fengliuai): this quantization dimension isn't correct.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 66.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0)