- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 225 for broadcasts (0.39 sec)
-
tensorflow/c/eager/parallel_device/parallel_device.cc
// Non-parallel tensors from _EagerConst/tf.constant are implicitly // broadcast, i.e. set as the input to each parallel operation. This // allows code like "tf.constant(1.)" or "tf.reduce_sum(..., axis=1)" // (where the value starts on the host), without allowing other implicit // copies/broadcasts. Other implicit copies may be supported eventually,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 29 22:05:31 UTC 2023 - 18.3K bytes - Viewed (0) -
src/main/java/jcifs/netbios/NbtAddress.java
*/ public static final int P_NODE = 1; /** * Try Broadcast queries first, then try to resolve the name using the * nameserver. */ public static final int M_NODE = 2; /** * A Hybrid node tries to resolve a name using the nameserver first. If * that fails use the broadcast address. This is the default if a nameserver
Registered: Wed Jun 12 15:45:55 UTC 2024 - Last Modified: Sun Jul 01 13:12:10 UTC 2018 - 15.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td
} def XlaBroadcastPass : Pass<"tf-xla-broadcast", "mlir::func::FuncOp"> { let summary = "Moves a broadcast from host into XLA, encoded as XlaAllReduce"; let description = [{ This pass moves brodcasts from host TF ops into XLA. This enables use of the inter-device network, which is faster than the inter-host network. Broadcasts in XLA are encoded as XlaAllReduce. An all_reduce with all 0
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-binary-elementwise.mlir
func.func @broadcast_shift_right_unsigned(%arg0: tensor<4xui8>, %arg1: tensor<2x4xui8>) -> tensor<2x4xui8> { // CHECK: %[[BROADCAST:.*]] = "mhlo.broadcast_in_dim"(%arg0) <{broadcast_dimensions = dense<1> : tensor<1xi64>}> : (tensor<4xui8>) -> tensor<2x4xui8> // CHECK: mhlo.shift_right_logical %[[BROADCAST]], %arg1 : tensor<2x4xui8> %0 = "tf.RightShift"(%arg0, %arg1) : (tensor<4xui8>, tensor<2x4xui8>) -> tensor<2x4xui8>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
constexpr StringRef kEntryFuncAttrName = "_entry_function"; // Returns broadcasted user op of an input op. Returns null if // the op is not broadcasted or not the intended type. // Supports both static broadcast and dynamic broadcast. // Note that the patterns below differ from lifted patterns as // ShapeLegalizeToHloPass is ran prior to running this pass. // // Dynamically broadcasted bias due to unknown input batch size
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
src/main/java/jcifs/smb1/netbios/NbtAddress.java
* be set. */ public static final int P_NODE = 1; /** * Try Broadcast queries first, then try to resolve the name using the * nameserver. */ public static final int M_NODE = 2; /** * A Hybrid node tries to resolve a name using the nameserver first. If * that fails use the broadcast address. This is the default if a nameserver
Registered: Wed Jun 12 15:45:55 UTC 2024 - Last Modified: Fri Mar 22 20:39:42 UTC 2019 - 30.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
// Rewire the output. rewriter.replaceOp(op, {nullptr, rnn_result}); return success(); } }; // Put two TFL BroadcastTo ops in front of the given TF binary broadcast op to // to make binary broadcast-able op conversion always successful and does not // require flex delegate. template <typename SourceOp> class ApplyExplicitBroadcasting : public OpRewritePattern<SourceOp> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc
return failure(); } lhs = rewriter.create<mhlo::BitcastConvertOp>(op->getLoc(), *lhs_quant_type, adaptor.getLhs()); // rhs (bias) is always 1D that broadcasts to the last dim of lhs. auto broadcast_dims = rewriter.getDenseI64ArrayAttr({lhs_type.getRank() - 1}); auto rhs_type = GetUniformQuantizedType(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 30.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
return std::make_pair(std::move(broadcasted_input_shape), std::move(broadcasted_weight_shape)); } // Broadcasts batch dimensions of the input and weight of the BatchMatMul // op. In XLA, shapes are all constants, so all operations created in this // function, except BroadcastTo, are expected to be folded. void BroadcastBatchDimensionsForBatchMatMul(OpBuilder &builder, Location loc,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
platforms/core-runtime/messaging/src/test/groovy/org/gradle/internal/event/DefaultListenerManagerInServiceRegistryTest.groovy
} }) def broadcast1 = listenerManager.getBroadcaster(TestListener) then: 0 * _ when: broadcast1.something("12") then: 1 * created.run() 1 * listener.something("12") 0 * _ when: def broadcast2 = listenerManager.getBroadcaster(DifferentListener)
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon May 27 12:34:44 UTC 2024 - 12.1K bytes - Viewed (0)