Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 140 for weights (0.2 sec)

  1. tensorflow/compiler/mlir/lite/utils/lstm_utils.cc

      input_ = fused_func_op_.getArgument(0);
      bias_ = fused_func_op_.getArgument(2);
    
      weight_ = fused_func_op_.getArgument(1);
      weight_type_ = mlir::cast<RankedTensorType>(weight_.getType());
    
      if (weight_type_.getRank() != 2) {
        return fused_func_op_.emitError() << "The weight tensor was not of rank 2";
      }
    
      if (weight_type_.getDimSize(1) % num_gates_ != 0) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 36.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc

                             METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8,
                         "weight_only", "Post-training weight-only quantizaiton"))};
    
      Option<bool> enable_per_channel_quantization_{
          *this, "enable-per-channel-quantization", llvm::cl::init(false),
          llvm::cl::desc("Whether enable per-channel quantized weights.")};
    };
    
    // Apply constant transformations for the op_set.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h

                  input.getDefiningOp())) {
            // Tensors with derived scale are biases, and handled in propagation.
            if (tensor_property.use_derived_scale) continue;
            // For weights, use quantization scale inferred from the values.
            if (failed(processConstantOp(op, input.getDefiningOp(), index,
                                         tensor_property, rewriter))) {
              return failure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 28K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

        LOG(INFO) << quantized_tensor->name()->str() << " "
                  << float_tensor->name()->str();
        if (ExpectEqualTensor(quantized_tensor, float_tensor)) {
          if (quantized && quantized_tensor->name()->str().find("weights")) {
            // If tensor is quantized, data type and buffer contents can be
            // different between float and quantized tensors. So do those tests
            // separately in the test body without checking them here.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc

        }
        storage_type = mlir::cast<mlir::IntegerType>(raw_elem_type);
      }
    
      // TFlite uses narrow-range [u]int8 for constant buffers of quantized weights.
      // Since we don't know which ones are weights, we represent this optimization
      // as a change in the storage bounds for the type for all constants of this
      // type.
      const int bitwidth = storage_type.getIntOrFloatBitWidth();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  6. pilot/pkg/networking/core/networkfilter_test.go

    						Port:   &networking.PortSelector{Number: 443},
    						Subset: "prod",
    					},
    					Weight: 75,
    				},
    				{
    					Destination: &networking.Destination{
    						Host:   "example-canary.com",
    						Port:   &networking.PortSelector{Number: 443},
    						Subset: "canary",
    					},
    					Weight: 25,
    				},
    			},
    		},
    	}
    
    	for _, tt := range cases {
    		t.Run(tt.name, func(t *testing.T) {
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Wed Apr 17 22:20:44 UTC 2024
    - 25.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc

      OpSet op_set_;
    
      Option<bool> enable_per_channel_quantization_{
          *this, "enable-per-channel-quantization", llvm::cl::init(false),
          llvm::cl::desc("Whether enable per-channel quantized weights.")};
    };
    
    // If the weight is applicable to dynamic range quantization, insert Quantize
    // and Dequantize ops with per-tensor scale.
    class PrepareDRQQuantizableOp : public OpRewritePattern<arith::ConstantOp> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

      SPARSE = 1,
      DENSE = 2,
    }
    
    table LSHProjectionOptions {
      type: LSHProjectionType;
    }
    
    table SVDFOptions {
      rank:int;
      fused_activation_function:ActivationFunctionType;
      // For weights-only quantization, use asymmetric quantization for non
      // constant inputs at evaluation time.
      asymmetric_quantize_inputs:bool;
    }
    
    // An implementation of TensorFlow RNNCell.
    table RNNOptions {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

              &q_builder, input_model, quantized_type, use_updated_hybrid_scheme,
              ::tflite::optimize::QuantizerType::OLD_QUANTIZER) != kTfLiteOk) {
        return absl::InvalidArgumentError(
            "Quantize weights transformation failed.");
      }
      const uint8_t* q_buffer = q_builder.GetBufferPointer();
      *result =
          std::string(reinterpret_cast<const char*>(q_buffer), q_builder.GetSize());
    
      return absl::OkStatus();
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h

    inline constexpr std::array<int64_t, 4> kNchwToNhwcPermutation = {0, 2, 3, 1};
    
    // Permutation from the OIHW (== (output features, input features, height,
    // width)) tensor format to HWIO. This is commonly used to transpose convolution
    // weights represented as OIHW format to HWIO, which is more desirable for
    // certain downstream optimization passes (e.g. XLA).
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
Back to top