Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 371 for weights (0.47 sec)

  1. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt

          }
        }
      }
    }
    node {
      name: "BoxPredictor_4/ClassPredictor/weights/read"
      op: "Identity"
      input: "BoxPredictor_4/ClassPredictor/weights"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "_class"
        value {
          list {
            s: "loc:@BoxPredictor_4/ClassPredictor/weights"
          }
        }
      }
    }
    node {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h

      const bool disable_per_channel_;
    
      // We should distinguish weights and bias constants. Biases are specified by
      // the quantization spec or are the operands of ops with same scale spec. The
      // rest are weights.
      DenseSet<Operation*> weights_;
    
      // The weights require narrow_range quantization. This map collects all the
      // weight operands defined by the op quant spec. The value of each entry is
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 20 11:42:17 UTC 2024
    - 16.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc

    namespace {
    
    using ::stablehlo::quantization::Method;
    using ::stablehlo::quantization::QuantizedType;
    using ::stablehlo::quantization::WeightOnlyPtq;
    
    // Inserts quantization parameters of weights for weight-only quantization and
    // dynamic range quantization of `stablehlo.convolution` and
    // `stablehlo.dot_general`.
    class InsertWeightParamPass
        : public impl::InsertWeightParamPassBase<InsertWeightParamPass> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 10.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc

          "composite_conv.*");
    
      // Enable per-channel quantization for convolution weights.
      QuantizedType conv_weight_quantized_type{};
    
      // Assumes NHWC format, specifying the channel dimension (3) as the
      // quantized axis.
      conv_weight_quantized_type.mutable_dimension_specs()->set_dimension(3);
    
      // The index of weight operands passed to lifted functions for convolution
      // is 1.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc

        // This is the argument used to refer to the pass in
        // the textual format (on the commandline for example).
        return "quant-quantize-weights";
      }
    
      StringRef getDescription() const final {
        // This is a brief description of the pass.
        return "Quantize weights used by quantizable ops.";
      }
    
      void getDependentDialects(DialectRegistry& registry) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 11.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

      // For now, restrict scale adjustment to ops with affine quantized weights,
      // and having weights and biases as constants. This currently only applies to
      // FC and Conv* ops. Restriction for the weight can be relaxed if there are
      // needs for adjusting scale of variable weights.
      auto affine_op = dyn_cast<AffineQuantizedOpInterface>(op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
  7. pilot/pkg/config/kube/gateway/conversion.go

    	if forwardTo == nil {
    		return nil, nil, nil
    	}
    
    	weights := []int{}
    	action := []k8s.BackendRef{}
    	for _, w := range forwardTo {
    		wt := int(ptr.OrDefault(w.Weight, 1))
    		if wt == 0 {
    			continue
    		}
    		action = append(action, w)
    		weights = append(weights, wt)
    	}
    	if len(weights) == 1 {
    		weights = []int{0}
    	}
    
    	var invalidBackendErr *ConfigError
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Fri Jun 14 04:34:37 UTC 2024
    - 84.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h

    // storage type. The available values use the full range of the storage value,
    // i.e. [-128, 127]. Assumes asymmetric quantization, meaning the zero point
    // value can be a non-zero value.
    // If `narrow_range` is set true (ex: for weights), a restricted range of
    // integers will be used for symmetric mapping, i.e. [-127, 127].
    UniformQuantizedType CreateI8F32UniformQuantizedType(Location loc,
                                                         MLIRContext& context,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  9. pilot/pkg/networking/core/loadbalancer/loadbalancer_test.go

    				weights := make([]int, 0)
    				for _, localityEndpoint := range cluster.LoadAssignment.Endpoints {
    					weights = append(weights, int(localityEndpoint.LoadBalancingWeight.GetValue()))
    				}
    				if !reflect.DeepEqual(weights, tt.expected) {
    					t.Errorf("Got weights %v expected %v", weights, tt.expected)
    				}
    			})
    		}
    	})
    
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Tue Apr 23 05:38:57 UTC 2024
    - 39.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td

    // point constant.
    def : Pat<(TFL_DequantizeOp
                 (TFL_QuantizeOp (Arith_ConstantOp F32ElementsAttr:$cst), $qt)),
              (TFL_ConstOp $cst)>;
    
    // Transpose conv supports hybrid computation with quantized weights.
    def FoldQuantWeightsIntoTposeConv : Pat<
      (TFL_TransposeConvOp
        $output_shape,
        (TFL_DequantizeOp $quant_weights),
        $quant_input,
        $bias, $padding, $stride_h, $stride_w, $faf),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 2.3K bytes
    - Viewed (0)
Back to top