- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 285 for weights (0.18 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc
"composite_conv.*"); // Enable per-channel quantization for convolution weights. QuantizedType conv_weight_quantized_type{}; // Assumes NHWC format, specifying the channel dimension (3) as the // quantized axis. conv_weight_quantized_type.mutable_dimension_specs()->set_dimension(3); // The index of weight operands passed to lifted functions for convolution // is 1.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// For now, restrict scale adjustment to ops with affine quantized weights, // and having weights and biases as constants. This currently only applies to // FC and Conv* ops. Restriction for the weight can be relaxed if there are // needs for adjusting scale of variable weights. auto affine_op = dyn_cast<AffineQuantizedOpInterface>(op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
pilot/pkg/config/kube/gateway/conversion.go
if forwardTo == nil { return nil, nil, nil } weights := []int{} action := []k8s.BackendRef{} for _, w := range forwardTo { wt := int(ptr.OrDefault(w.Weight, 1)) if wt == 0 { continue } action = append(action, w) weights = append(weights, wt) } if len(weights) == 1 { weights = []int{0} } var invalidBackendErr *ConfigError
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Fri Jun 14 04:34:37 UTC 2024 - 84.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h
// storage type. The available values use the full range of the storage value, // i.e. [-128, 127]. Assumes asymmetric quantization, meaning the zero point // value can be a non-zero value. // If `narrow_range` is set true (ex: for weights), a restricted range of // integers will be used for symmetric mapping, i.e. [-127, 127]. UniformQuantizedType CreateI8F32UniformQuantizedType(Location loc, MLIRContext& context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
pilot/pkg/networking/core/loadbalancer/loadbalancer_test.go
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Tue Apr 23 05:38:57 UTC 2024 - 39.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td
// point constant. def : Pat<(TFL_DequantizeOp (TFL_QuantizeOp (Arith_ConstantOp F32ElementsAttr:$cst), $qt)), (TFL_ConstOp $cst)>; // Transpose conv supports hybrid computation with quantized weights. def FoldQuantWeightsIntoTposeConv : Pat< (TFL_TransposeConvOp $output_shape, (TFL_DequantizeOp $quant_weights), $quant_input, $bias, $padding, $stride_h, $stride_w, $faf),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:10:13 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
(binaryOp (TFL_TransposeConvOp:$output $output_shape, $weights, $input, (Arith_ConstantOp FloatElementsAttr:$bias), $padding, $stride_h, $stride_w, TFL_AF_None), (Arith_ConstantOp FloatElementsAttr:$value), $act_fn), (TFL_TransposeConvOp $output_shape, $weights, $input, (binaryOp (Arith_ConstantOp $bias), (Arith_ConstantOp $value), TFL_AF_None),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
pilot/pkg/xds/endpoints/ep_filters.go
return scaleFactor } weight := uint32(math.MaxUint32) if ep.GetLoadBalancingWeight().Value < math.MaxUint32/scaleFactor { weight = ep.GetLoadBalancingWeight().Value * scaleFactor } return weight } // Apply the weight for this endpoint to the network gateways.
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Wed May 29 01:17:58 UTC 2024 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
TFL_TensorOf<[F32, I8]>:$fw_recurrent_to_output_weights, // Forward Cell weights TFL_TensorOfOrNone<[F32, I8]>:$fw_cell_to_input_weights, // Optional Forward cell weights TFL_TensorOfOrNone<[F32, I8]>:$fw_cell_to_forget_weights, // Optional Forward cell weights TFL_TensorOfOrNone<[F32, I8]>:$fw_cell_to_output_weights, // Forward Bias
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils.cc
input_ = fused_func_op_.getArgument(0); bias_ = fused_func_op_.getArgument(2); weight_ = fused_func_op_.getArgument(1); weight_type_ = mlir::cast<RankedTensorType>(weight_.getType()); if (weight_type_.getRank() != 2) { return fused_func_op_.emitError() << "The weight tensor was not of rank 2"; } if (weight_type_.getDimSize(1) % num_gates_ != 0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 36.2K bytes - Viewed (0)