Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 255 for weights (0.13 sec)

  1. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h

                  input.getDefiningOp())) {
            // Tensors with derived scale are biases, and handled in propagation.
            if (tensor_property.use_derived_scale) continue;
            // For weights, use quantization scale inferred from the values.
            if (failed(processConstantOp(op, input.getDefiningOp(), index,
                                         tensor_property, rewriter))) {
              return failure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 28K bytes
    - Viewed (0)
  2. pkg/scheduler/framework/plugins/interpodaffinity/scoring_test.go

    		},
    		// Consider Affinity, Anti Affinity and symmetry together.
    		// for Affinity, the weights are:                8,  0,  0,  0
    		// for Anti Affinity, the weights are:           0, -5,  0,  0
    		// for Affinity symmetry, the weights are:       0,  0,  8,  0
    		// for Anti Affinity symmetry, the weights are:  0,  0,  0, -5
    		{
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri Dec 15 03:30:06 UTC 2023
    - 44.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

        LOG(INFO) << quantized_tensor->name()->str() << " "
                  << float_tensor->name()->str();
        if (ExpectEqualTensor(quantized_tensor, float_tensor)) {
          if (quantized && quantized_tensor->name()->str().find("weights")) {
            // If tensor is quantized, data type and buffer contents can be
            // different between float and quantized tensors. So do those tests
            // separately in the test body without checking them here.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc

        }
        storage_type = mlir::cast<mlir::IntegerType>(raw_elem_type);
      }
    
      // TFlite uses narrow-range [u]int8 for constant buffers of quantized weights.
      // Since we don't know which ones are weights, we represent this optimization
      // as a change in the storage bounds for the type for all constants of this
      // type.
      const int bitwidth = storage_type.getIntOrFloatBitWidth();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  5. pilot/pkg/networking/core/networkfilter_test.go

    						Port:   &networking.PortSelector{Number: 443},
    						Subset: "prod",
    					},
    					Weight: 75,
    				},
    				{
    					Destination: &networking.Destination{
    						Host:   "example-canary.com",
    						Port:   &networking.PortSelector{Number: 443},
    						Subset: "canary",
    					},
    					Weight: 25,
    				},
    			},
    		},
    	}
    
    	for _, tt := range cases {
    		t.Run(tt.name, func(t *testing.T) {
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Wed Apr 17 22:20:44 UTC 2024
    - 25.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc

      OpSet op_set_;
    
      Option<bool> enable_per_channel_quantization_{
          *this, "enable-per-channel-quantization", llvm::cl::init(false),
          llvm::cl::desc("Whether enable per-channel quantized weights.")};
    };
    
    // If the weight is applicable to dynamic range quantization, insert Quantize
    // and Dequantize ops with per-tensor scale.
    class PrepareDRQQuantizableOp : public OpRewritePattern<arith::ConstantOp> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

      SPARSE = 1,
      DENSE = 2,
    }
    
    table LSHProjectionOptions {
      type: LSHProjectionType;
    }
    
    table SVDFOptions {
      rank:int;
      fused_activation_function:ActivationFunctionType;
      // For weights-only quantization, use asymmetric quantization for non
      // constant inputs at evaluation time.
      asymmetric_quantize_inputs:bool;
    }
    
    // An implementation of TensorFlow RNNCell.
    table RNNOptions {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
  8. pkg/test/loadbalancersim/lb_test.go

    	wg := sync.WaitGroup{}
    
    	clientLatencies := make([]timeseries.Data, len(s.mesh.Clients()))
    	for i, client := range s.mesh.Clients() {
    		i := i
    		client := client
    		wg.Add(1)
    		go func() {
    			// Assign weights to the endpoints.
    			var conns []*loadbalancer.WeightedConnection
    			for _, n := range s.mesh.Nodes() {
    				conns = append(conns, s.newWeightedConnection(client, n))
    			}
    
    			// Create a load balancer
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Thu May 19 23:29:30 UTC 2022
    - 11K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

              &q_builder, input_model, quantized_type, use_updated_hybrid_scheme,
              ::tflite::optimize::QuantizerType::OLD_QUANTIZER) != kTfLiteOk) {
        return absl::InvalidArgumentError(
            "Quantize weights transformation failed.");
      }
      const uint8_t* q_buffer = q_builder.GetBufferPointer();
      *result =
          std::string(reinterpret_cast<const char*>(q_buffer), q_builder.GetSize());
    
      return absl::OkStatus();
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  10. pkg/scheduler/framework/plugins/interpodaffinity/scoring.go

    }
    
    func (m scoreMap) processTerm(term *framework.AffinityTerm, weight int32, pod *v1.Pod, nsLabels labels.Set, node *v1.Node, multiplier int32) {
    	if term.Matches(pod, nsLabels) {
    		if tpValue, tpValueExist := node.Labels[term.TopologyKey]; tpValueExist {
    			if m[term.TopologyKey] == nil {
    				m[term.TopologyKey] = make(map[string]int64)
    			}
    			m[term.TopologyKey][tpValue] += int64(weight * multiplier)
    		}
    	}
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri Dec 15 03:30:06 UTC 2023
    - 10.5K bytes
    - Viewed (0)
Back to top