Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 21 for dequantize_i8 (0.25 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

    // CHECK: %[[dequantize:.*]] = "tf.PartitionedCall"(%[[maxpool]]
    // CHECK-SAME: f = @dequantize_i8
    // CHECK: return %[[dequantize]]
    
    // CHECK: -------- Quantization Summary --------
    // CHECK: Number of quantized layers in the model
    // CHECK: --------------------------------
    // CHECK: Name    Count/Total
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

    using ::tensorflow::quantization::OpSet;
    
    constexpr absl::string_view kQuantizeCompositeFunctionsStepName =
        "_quantize_composite_functions";
    constexpr StringRef kQuantizeFuncName = "quantize_i8";
    constexpr StringRef kDequantizeFuncName = "dequantize_i8";
    constexpr StringRef kAttrMapAttribute = "attr_map";
    constexpr StringRef kQuantizedOpsAttribute = "tf_quant.quantized_ops";
    constexpr StringRef kCompositeFuncPrefix = "composite_";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

        %round = "tf.Round"(%clamp_min) : (tensor<*xf32>) -> tensor<*xf32>
        %i8 = "tf.Cast"(%round) : (tensor<*xf32>) -> tensor<*xi8>
        func.return %i8 : tensor<*xi8>
      }
    
      func.func @dequantize_i8(%input : tensor<*xi8>, %scale : tensor<*xf32>, %zp : tensor<*xi32>) -> tensor<*xf32> {
        // Use identity op to avoid the weight being constant-folded.
        %identity = "tf.Identity"(%input) : (tensor<*xi8>) -> tensor<*xi8>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc

          : OpRewritePattern<DequantizeOp>(context) {}
    
      LogicalResult matchAndRewrite(DequantizeOp dequantize_op,
                                    PatternRewriter& rewriter) const override {
        if (!dequantize_op->hasOneUse()) return failure();
    
        auto use = dequantize_op->use_begin();
        Operation* passthrough_op = use->getOwner();
        unsigned operand_index = use->getOperandNumber();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc

            output_type.print(llvm::errs() << "Requested output type ");
            dequantize_op.emitError(" Couldn't be modified to the requested type.");
            return failure();
          }
          new_output_types[i] = returned_type;
          terminator->setOperand(i, returned_value);
          if (dequantize_op.use_empty()) {
            dequantize_op.erase();
          }
        }
      }
      return success();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op_test.cc

      quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8);
    
      std::optional<TF::PartitionedCallOp> dequantize_op = ApplyUniformQuantization(
          pattern_rewriter, cast<TF::ConstOp>(value.getDefiningOp()), quant_spec);
      EXPECT_TRUE(dequantize_op.has_value());
      EXPECT_EQ(dequantize_op.value().func().getName().str(),
                "composite_dequantize_uniform");
    }
    
    }  // namespace
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h

          : OpRewritePattern<RootOpT>(context, /*benefit=*/300) {}
    
     private:
      // Collects all candidate ops for quantization, which are the
      // `dequantize_op`'s users.
      FailureOr<SmallVector<Operation*>> CollectCandidateOps(
          DequantizeOpT dequantize_op) const {
        auto users = dequantize_op->getResult(0).getUsers();
        return SmallVector<Operation*>(users.begin(), users.end());
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc

                mlir::quant::QuantizedType::castToExpressedType(input_type);
            builder->setInsertionPoint(op);
            auto dequantize_op = builder->create<TFL::DequantizeOp>(
                op->getLoc(), dequantized_input_type, input.get());
            dequantized_inputs.push_back(dequantize_op);
          } else {
            dequantized_inputs.push_back(input.get());
          }
        }
    
        // Result types.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc

      auto quantize_op = FindOperationOfType<quantfork::QuantizeCastOp>(test_func);
      EXPECT_FALSE(IsOpQuantizableStableHlo(quantize_op));
    
      auto dequantize_op =
          FindOperationOfType<quantfork::DequantizeCastOp>(test_func);
      EXPECT_FALSE(IsOpQuantizableStableHlo(dequantize_op));
    }
    
    TEST_F(IsOpQuantizableStableHloTest,
           XlaCallModuleOpQuantizableWhenNotDenylisted) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

        Operation* returned_op = returned_value.getDefiningOp();
        if (returned_op && returned_op->hasOneUse() &&
            llvm::isa<DequantizeOp>(returned_op)) {
          auto dequantize_op = llvm::cast<DequantizeOp>(returned_op);
          Value dequantized_result = dequantize_op.getInput();
          output_types.push_back(dequantized_result.getType());
          terminator->setOperand(i, dequantized_result);
          returned_op->erase();
        } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
Back to top