- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 64 for Bias (0.09 sec)
-
tensorflow/c/experimental/gradients/nn_grad.cc
Status Compute(AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> grad_outputs, absl::Span<AbstractTensorHandle*> grad_inputs) override { /* Given upstream grad U and a BiasAdd: A + bias, the gradients are: * * dA = U * dbias = reduceSum(U, dims = channel_dim) */ AbstractTensorHandle* upstream_grad = grad_outputs[0]; DCHECK(upstream_grad);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:38:45 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/resources/composite_ops.cc
.Attr("N: int >= 1") .Attr("T: {numbertype, variant}") .SetIsCommutative() .SetIsAggregate(); REGISTER_OP("MyBiasedDense") .Input("input: T") .Input("weight: T") .Input("bias: T") .Output("out: T") .Attr("T: {float, int8}") .Attr("act: {'', 'relu', 'relu6'} = ''");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 23 21:28:23 UTC 2020 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla.mlir
// CHECK-DAG: %[[BIAS:.*]] = "tf.Const"() <{value = dense<[1.000000e-01, 2.000000e-01]> : tensor<2xf32>}> {device = ""} // CHECK: %[[Q_W:.*]] = "quantfork.qcast"(%[[WEIGHT]]) // CHECK: %[[DQ_W:.*]] = "quantfork.dcast"(%[[Q_W]]) // CHECK: %[[PARTITIONEDCALL_0:.*]] = "tf.PartitionedCall"({{.*}}, %[[DQ_W]], %[[BIAS]]) // CHECK-SAME: f = @composite_conv2d_with_bias_and_relu_fn_1
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 8.3K bytes - Viewed (0) -
src/math/cmplx/tan.go
} // Must apply Payne-Hanek range reduction const ( mask = 0x7FF shift = 64 - 11 - 1 bias = 1023 fracMask = 1<<shift - 1 ) // Extract out the integer and exponent such that, // x = ix * 2 ** exp. ix := math.Float64bits(x) exp := int(ix>>shift&mask) - bias - shift ix &= fracMask ix |= 1 << shift // mPi is the binary digits of 1/Pi as a uint64 array,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 01 03:16:37 UTC 2020 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantize_passes.cc
.has_preset_quantization_method()) { quantization_options_ = mlir::quant::stablehlo::FillPresetQuantizationOptions( quantization_options); } // TODO: b/276999414 - Add activation and bias quantization component as // respective quantization passes are created. QuantizationComponentSpec weight_component; for (const auto& component : quantization_options_.quantization_method()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 08:32:43 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantize_passes.h
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.pb.h" namespace stablehlo { namespace quantization { // Adds passes for quantization of individual quantizable components. // (i.e. activation, weight, bias) void AddQuantizationPasses(mlir::PassManager& pass_manager, const QuantizationOptions& quantization_options); } // namespace quantization } // namespace stablehlo
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 22 12:03:14 UTC 2023 - 1.4K bytes - Viewed (0) -
src/math/bits.go
package math const ( uvnan = 0x7FF8000000000001 uvinf = 0x7FF0000000000000 uvneginf = 0xFFF0000000000000 uvone = 0x3FF0000000000000 mask = 0x7FF shift = 64 - 11 - 1 bias = 1023 signMask = 1 << 63 fracMask = 1<<shift - 1 ) // Inf returns positive infinity if sign >= 0, negative infinity if sign < 0. func Inf(sign int) float64 { var v uint64 if sign >= 0 { v = uvinf
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 05 17:52:29 UTC 2022 - 1.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/convert_tf_quant_ops_to_mhlo.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.td
def OptimizeIntGraph : Pass<"optimize-int-graph", "mlir::func::FuncOp"> { let summary = "Optimization patterns for quantized integer graph"; let description = [{ This includes patterns for merging addition of zp offset and bias. }]; let constructor = "mlir::quant::stablehlo::CreateOptimizeIntGraphPass()"; let dependentDialects = ["mhlo::MhloDialect"];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 23 01:41:18 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.h
using OpRewritePattern<TFL::SplitVOp>::OpRewritePattern; LogicalResult matchAndRewrite(TFL::SplitVOp splitv_op, PatternRewriter& rewriter) const override; }; // Ensure bias for conv2d op. struct EnsureBiasForConv2d : public OpRewritePattern<TFL::Conv2DOp> { using OpRewritePattern<TFL::Conv2DOp>::OpRewritePattern; LogicalResult matchAndRewrite(TFL::Conv2DOp conv_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 03 16:37:16 UTC 2022 - 4.3K bytes - Viewed (0)