- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 200 for dequantize (0.22 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc
func.return %add : tensor<3x2xf32> } } )mlir"; // Quantizable ops: XlaCallModule op with "fully_quantizable" attribute and // same-scale StableHLO ops // Non-quantizable ops: quantize/dequantize ops constexpr absl::string_view kModuleCompositeSameScale = R"mlir( module { func.func @same_scale_after_composite() -> tensor<3x1xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 07:19:09 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// hardware performs better with integer ops. // Default value: true optional bool unpack_quantized_types = 1; // When set to True, requantize op in the quantized fusion will merge with the // subsequent dequantize op if present. // Default value: false // TODO: b/321729008 - re-consider default value after testing on prod model. bool merge_fusion_with_dequantize = 2; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
bool enable_legacy_weight_only = false, std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt); // Converts dequantize-(quantizable) call-quantize pattern to a single call op // that has quantized input and output types. It is expected for this pass to // emit illegal IR with unsupported quantized input and output types. The
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_weights.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 42K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
(TF_ConstOp (GetI64ScalarElementsAttr<-1>)))), (TF_SoftmaxCrossEntropyWithLogitsOp $features, $adjusted_labels)]>; //===----------------------------------------------------------------------===// // Dequantize op patterns. //===----------------------------------------------------------------------===// def DequantizeHalfRange : NativeCodeCall< "DequantizeHalfRange(&$_builder, $0)">;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
func.return %0 : tensor<8x8x8x8xf32> // CHECK-LABEL: fakeQuantArgsFalse // CHECK: "tfl.quantize"(%arg0) <{qtype = tensor<8x8x8x8x!quant.uniform<u8:f32, 0.0011764706057660721:85>>}> // CHECK: %1 = "tfl.dequantize"(%0) : (tensor<8x8x8x8x!quant.uniform<u8:f32, 0.0011764706057660721:85>>) -> tensor<8x8x8x8xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 05 01:54:33 UTC 2024 - 153.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
const mlir::TFL::PassConfig& pass_config, mlir::OpPassManager* pass_manager) { // This pass wraps all the tf.FakeQuant ops in a custom op so they are not // folded before being converted to tfl.quantize and tfl.dequantize ops. auto wrapped_ops = mlir::TFL::AllTfFakeQuantOps(); pass_manager->addNestedPass<mlir::func::FuncOp>( mlir::TFL::CreateRaiseCustomOpsPass(wrapped_ops));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
src/image/jpeg/scan.go
} } } } if d.progressive { // Save the coefficients. d.progCoeffs[compIndex][by*mxx*hi+bx] = b // At this point, we could call reconstructBlock to dequantize and perform the // inverse DCT, to save early stages of a progressive image to the *image.YCbCr // buffers (the whole point of progressive encoding), but in Go, the jpeg.Decode
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 25 00:46:29 UTC 2024 - 15.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize.mlir
%cst_0 = arith.constant dense<[1.0, 2.0, 3.0]> : tensor<3xf32> %w = arith.constant dense<2.0> : tensor<3x3x3x3xf32> %q = "tfl.quantize"(%w) {qtype = tensor<3x3x3x3x!quant.uniform<i8<-127:127>:f32:0,{1.0,2.0,3.0}>>} : (tensor<3x3x3x3xf32>) -> tensor<3x3x3x3x!quant.uniform<i8<-127:127>:f32:0,{1.0,2.0,3.0}>> %dq = "tfl.dequantize"(%q) : (tensor<3x3x3x3x!quant.uniform<i8<-127:127>:f32:0,{1.0,2.0,3.0}>>) -> tensor<3x3x3x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 284.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
// set of acceptable options. // LINT.IfChange enum BuiltinOperator : int32 { ADD = 0, AVERAGE_POOL_2D = 1, CONCATENATION = 2, CONV_2D = 3, DEPTHWISE_CONV_2D = 4, DEPTH_TO_SPACE = 5, DEQUANTIZE = 6, EMBEDDING_LOOKUP = 7, FLOOR = 8, FULLY_CONNECTED = 9, HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0)