- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 1,343 for ireduce (0.2 sec)
-
platforms/core-configuration/kotlin-dsl/src/test/kotlin/org/gradle/kotlin/dsl/execution/PartialEvaluatorTest.kt
private fun partialEvaluationOf( program: Program, programKind: ProgramKind, programTarget: ProgramTarget ): ResidualProgram = PartialEvaluator(programKind, programTarget).reduce(program) private fun isResidualProgram(program: ResidualProgram) = equalTo(program)
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Wed Aug 02 08:06:49 UTC 2023 - 38.9K bytes - Viewed (0) -
src/crypto/internal/boring/build-goboring.sh
__umodti3: # specialized to u128 % u64, so verify that test %rcx,%rcx jne 1f # save divisor movq %rdx, %r8 # reduce top 64 bits mod divisor movq %rsi, %rax xorl %edx, %edx divq %r8 # reduce full 128-bit mod divisor # quotient fits in 64 bits because top 64 bits have been reduced < divisor. # (even though we only care about the remainder, divq also computes
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jan 26 22:52:27 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
out_dims); } Value reduced = builder.create<TF::EinsumOp>( loc, RankedTensorType::get(output_shape, builder.getIntegerType(32)), input_arguments, builder.getStringAttr(einsum_equation)); reduced.getDefiningOp()->setAttr( kTfQuantCreatedEinsum, BoolAttr::get(reduced.getDefiningOp()->getContext(), true));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/error_util.h
namespace mlir { // TensorFlow's Status is used for error reporting back to callers. using ::tensorflow::Status; // TF customized diagnostic handler that collects all the diagnostics reported // and can produce a Status to return to callers. This is for the case where // MLIR functions are called from a function that will return a Status: MLIR // code still uses the default error reporting, and the final return function
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.9K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad.cc
// [1] auto reduced = Cast(cpu_scope, reduction_indices_pos, DataType::DT_INT32); // [0, 1, 2] auto idx = Range(cpu_scope, zero, rank, one); // [0, 2] auto other = SetDiff1D(cpu_scope, idx, reduced).out; // [1, 0, 2] auto perm = Concat(cpu_scope, std::initializer_list<Input>{reduced, other}, 0); // 3 => [3]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 50.7K bytes - Viewed (0) -
platforms/core-configuration/kotlin-dsl/src/main/kotlin/org/gradle/kotlin/dsl/execution/PartialEvaluator.kt
enum class ProgramTarget { Project, Settings, Gradle } /** * Reduces a [Program] into a [ResidualProgram] given its [kind][ProgramKind] and [target][ProgramTarget]. */ internal class PartialEvaluator( private val programKind: ProgramKind, private val programTarget: ProgramTarget ) { fun reduce(program: Program): ResidualProgram = when (program) {
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Wed Aug 02 08:06:49 UTC 2023 - 7.5K bytes - Viewed (0) -
src/math/log.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 11 16:34:30 UTC 2022 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc
namespace mlir { namespace TFL { namespace { #define GEN_PASS_DEF_OPTIMIZEOPORDERPASS #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc" // Dequantize ops will produce 3x larger tensors, so we want to move it after // some passthrough ops to reduce the memory consumption. struct PushDownDequantize : public OpRewritePattern<DequantizeOp> { explicit PushDownDequantize(MLIRContext* context)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo.mlir
%1 = mhlo.constant dense<false> : tensor<i1> %2 = mhlo.constant dense<0> : tensor<i32> %3:2 = mhlo.reduce(%arg0 init: %1), (%0 init: %2) across dimensions = [0] : (tensor<2xi1>, tensor<2xi32>, tensor<i1>, tensor<i32>) -> (tensor<i1>, tensor<i32>) reducer(%arg1: tensor<i1>, %arg3: tensor<i1>) (%arg2: tensor<i32>, %arg4: tensor<i32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 40.1K bytes - Viewed (0) -
tensorflow/cc/gradients/linalg_grad.cc
if (!has_repeated_labels && input_subs_without_reduced_labels == output_subs) { // Obtain the shape of the output, as if keepdims=True on reduce sum. E.g. // for the equation "abcd->ac" with input shape [2,5,3,4], we get the // reduced shape [2,1,3,1]. auto reduced_shape = ReducedShapeHelper(scope, input_shape, reduced_axes); // Reshaping the gradient (wrt "ac") to [2,1,3,1] and broadcasting it to
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 07 23:11:54 UTC 2022 - 20.4K bytes - Viewed (0)