- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 201 for broadcasts (0.16 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
constexpr StringRef kEntryFuncAttrName = "_entry_function"; // Returns broadcasted user op of an input op. Returns null if // the op is not broadcasted or not the intended type. // Supports both static broadcast and dynamic broadcast. // Note that the patterns below differ from lifted patterns as // ShapeLegalizeToHloPass is ran prior to running this pass. // // Dynamically broadcasted bias due to unknown input batch size
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
// Rewire the output. rewriter.replaceOp(op, {nullptr, rnn_result}); return success(); } }; // Put two TFL BroadcastTo ops in front of the given TF binary broadcast op to // to make binary broadcast-able op conversion always successful and does not // require flex delegate. template <typename SourceOp> class ApplyExplicitBroadcasting : public OpRewritePattern<SourceOp> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc
return failure(); } lhs = rewriter.create<mhlo::BitcastConvertOp>(op->getLoc(), *lhs_quant_type, adaptor.getLhs()); // rhs (bias) is always 1D that broadcasts to the last dim of lhs. auto broadcast_dims = rewriter.getDenseI64ArrayAttr({lhs_type.getRank() - 1}); auto rhs_type = GetUniformQuantizedType(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 30.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
return std::make_pair(std::move(broadcasted_input_shape), std::move(broadcasted_weight_shape)); } // Broadcasts batch dimensions of the input and weight of the BatchMatMul // op. In XLA, shapes are all constants, so all operations created in this // function, except BroadcastTo, are expected to be folded. void BroadcastBatchDimensionsForBatchMatMul(OpBuilder &builder, Location loc,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
platforms/core-runtime/messaging/src/test/groovy/org/gradle/internal/event/DefaultListenerManagerInServiceRegistryTest.groovy
} }) def broadcast1 = listenerManager.getBroadcaster(TestListener) then: 0 * _ when: broadcast1.something("12") then: 1 * created.run() 1 * listener.something("12") 0 * _ when: def broadcast2 = listenerManager.getBroadcaster(DifferentListener)
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon May 27 12:34:44 UTC 2024 - 12.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/fold-broadcast.mlir
// RUN: tf-opt -tf-broadcast-fold %s | FileCheck %s // CHECK-LABEL: @broadcast_mul0 func.func @broadcast_mul0(%arg0: tensor<5x7xf32>, %arg1: tensor<7xf32>) -> tensor<5x7xf32> { %cst = arith.constant dense<[5, 7]> : tensor<2xi32> %0 = "tf.BroadcastTo"(%arg1, %cst) : (tensor<7xf32>, tensor<2xi32>) -> tensor<5x7xf32> %1 = "tf.Mul"(%arg0, %0) : (tensor<5x7xf32>, tensor<5x7xf32>) -> tensor<5x7xf32> func.return %1 : tensor<5x7xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.6K bytes - Viewed (0) -
platforms/ide/tooling-api/src/main/java/org/gradle/tooling/internal/consumer/parameters/BuildProgressListenerAdapter.java
import static java.util.Collections.emptyList; /** * Converts progress events sent from the tooling provider to the tooling client to the corresponding event types available on the public Tooling API, and broadcasts the converted events to the * matching progress listeners. This adapter handles all the different incoming progress event types (except the original logging-derived progress listener). */
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Tue May 28 09:03:53 UTC 2024 - 67.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc
// This differs from a normal TF::AddOp with respect to how the data_format // is handled, which can optionally require a general broadcast of the // 'bias' term in a way that is not compatible with the standard left-padded // broadcast semantics (i.e. NCHW will broadcast into dimension 1). // The correct 'bias' broadcast will be synthesized manually. class ConvertBiasAddOp : public OpRewritePattern<TF::BiasAddOp> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 291.8K bytes - Viewed (0) -
platforms/core-runtime/messaging/src/main/java/org/gradle/internal/event/DefaultListenerManager.java
synchronized (lock) { EventBroadcast<T> broadcaster = Cast.uncheckedCast(broadcasters.get(listenerClass)); if (broadcaster == null) { broadcaster = new EventBroadcast<T>(listenerClass); broadcasters.put(listenerClass, broadcaster); for (ListenerDetails listener : allListeners.values()) { broadcaster.maybeAdd(listener); }
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Apr 11 10:09:43 UTC 2024 - 18.3K bytes - Viewed (0) -
platforms/software/dependency-management/src/test/groovy/org/gradle/api/internal/artifacts/configurations/DefaultConfigurationSpec.groovy
config.resolve() then: parent.observedState == ConfigurationInternal.InternalState.GRAPH_RESOLVED } def "resolving configuration puts it into the right state and broadcasts events"() { def listenerBroadcaster = Mock(AnonymousListenerBroadcast) def listener = Mock(DependencyResolutionListener) def config when: config = conf("conf")
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu May 23 17:30:13 UTC 2024 - 64.8K bytes - Viewed (0)