Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for scale_fn (0.16 sec)

  1. tensorflow/compiler/mlir/lite/quantization/device_target.h

    struct KernelSpec {
      // Scale constraint
      ScaleConstraintType type;
    
      // Custom function to derive the scales. Only available when the scale
      // constraint is `CustomScale`.
      ScaleFn scale_fn;
    };
    
    class KernelSpecs {
     public:
      using Signature = llvm::SmallVector<quant::AnyQuantizedType, 4>;
    
      // Returns the kernel specification for the kernel signature.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 7.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/quantization_context.cc

      switch (spec->type) {
        case ScaleConstraintType::OutputInputFreeScale: {
          // no propagation.
          *changed |= false;
          break;
        }
        case ScaleConstraintType::CustomScale: {
          if (failed(spec->scale_fn(this, op, new_items, changed))) {
            return failure();
          }
          break;
        }
        case ScaleConstraintType::OutputInputSameScale: {
          auto params = GetQuantParamsForSameScaleConstraint(op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 08 01:38:03 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc

      Value scale_op = rewriter.create<TF::ConstOp>(
          loc, scale_type,
          DenseFPElementsAttr::get(scale_type,
                                   {static_cast<float>(qtype.getScale())}));
    
      if (original_input_tensor_type.getElementType().isBF16()) {
        // Add bf16 cast op after scale to match with the next op's data
        // type.
        scale_op = rewriter.create<TF::CastOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h

          return quantizeF32ToInt8(expressed_value);
        }
    
        bool lossy;
        expressed_value.convert(scale_.getSemantics(), round_mode_, &lossy);
        // fixed_point = clamp(clamp_min, clamp_max, (
        //   roundHalfToEven(expressed / scale) + zero_point))
        APFloat scaled = (expressed_value / scale_);
        scaled.roundToIntegral(round_mode_);
        scaled.add(zero_point_, round_mode_);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/device_target.cc

      } else {  // float
        signature->push_back(AnyQuantizedType());
      }
    }
    
    LogicalResult DeviceTarget::RegisterKernel(
        llvm::StringRef kernel, const KernelSpecs::Signature& signature,
        const ScaleFn& fn, const ScaleDecomposeFn& dfn) {
      return specs_[kernel].Add(signature, {ScaleConstraintType::CustomScale, fn});
    }
    
    namespace ph = std::placeholders;
    
    LogicalResult DeviceTarget::RegisterKernel(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  6. src/cmd/vendor/github.com/google/pprof/profile/profile.go

    		return
    	}
    	ratios := make([]float64, len(p.SampleType))
    	for i := range p.SampleType {
    		ratios[i] = ratio
    	}
    	p.ScaleN(ratios)
    }
    
    // ScaleN multiplies each sample values in a sample by a different amount
    // and keeps only samples that have at least one non-zero value.
    func (p *Profile) ScaleN(ratios []float64) error {
    	if len(p.SampleType) != len(ratios) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 31 19:48:28 UTC 2024
    - 22.3K bytes
    - Viewed (0)
  7. src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go

    			if sampleType[i] == nil {
    				ratios[i] = 1
    				continue
    			}
    			ratios[i], _ = Scale(1, st.Unit, sampleType[i].Unit)
    			p.SampleType[i].Unit = sampleType[i].Unit
    		}
    		if err := p.ScaleN(ratios); err != nil {
    			return fmt.Errorf("scale: %v", err)
    		}
    	}
    	return nil
    }
    
    // CommonValueType returns the finest type from a set of compatible
    // types.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 31 19:48:28 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  8. src/cmd/vendor/github.com/google/pprof/profile/merge.go

    	normScale := make([]float64, len(baseVals))
    	for i := range baseVals {
    		if srcVals[i] == 0 {
    			normScale[i] = 0.0
    		} else {
    			normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
    		}
    	}
    	p.ScaleN(normScale)
    	return nil
    }
    
    func isZeroSample(s *Sample) bool {
    	for _, v := range s.Value {
    		if v != 0 {
    			return false
    		}
    	}
    	return true
    }
    
    type profileMerger struct {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Feb 16 15:19:53 UTC 2024
    - 17K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/schema/schema_generated.h

      auto max__ = max ? _fbb.CreateVector<float>(*max) : 0;
      auto scale__ = scale ? _fbb.CreateVector<float>(*scale) : 0;
      auto zero_point__ = zero_point ? _fbb.CreateVector<int64_t>(*zero_point) : 0;
      return tflite::CreateQuantizationParameters(
          _fbb,
          min__,
          max__,
          scale__,
          zero_point__,
          details_type,
          details,
          quantized_dimension);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 1M bytes
    - Viewed (0)
Back to top