Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for custom_op_map (0.44 sec)

  1. tensorflow/compiler/mlir/lite/transforms/quantize.cc

        std::string op_name = custom_op.getCustomCode().str();
        return (custom_op_map.find(op_name) == custom_op_map.end()) ? false : true;
      }
    
      static bool AllowDynamicRangeQuantizedOperand(
          Operation* quantized_op, const quant::CustomOpMap& custom_op_map) {
        // Collect the input if dynamic range quantization is on and the op supports
        // it.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

          std::string op_name = q.getCustomCode().str();
          if ((custom_op_map.find(op_name) == custom_op_map.end()) ||
              !custom_op_map.find(op_name)->second.no_side_effect)
            return failure();
        }
        rewriter.eraseOp(op);
        return success();
      }
      quant::CustomOpMap custom_op_map;
    };
    
    #include "tensorflow/compiler/mlir/lite/transforms/generated_post_quantize.inc"
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

      TfLiteBuiltinOpToMlir(op_denylist, mlir_op_denylist);
    
      return QuantizeWeights(
          builder, input_model, inference_type,
          /*denylisted_ops=*/mlir_op_denylist,
          /*custom_op_map=*/custom_op_map,
          /*minimum_elements_for_weights=*/weights_min_num_elements,
          /*disable_per_channel=*/!use_updated_hybrid_scheme,
          /*weight_only_quantization=*/false,
          /*legacy_float_scale=*/true);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.cc

                absl::StrSplit(node_specification, '-');
            for (const std::string& cur_index : indices) {
              custom_op_map[node_name].quantizable_input_indices.push_back(
                  std::stoi(cur_index));
            }
            break;
          }
          case CustomOpUpdateOptions::kWeightOnly:
            custom_op_map[node_name].is_weight_only =
                GetBooleanSpecs(node_specification);
            break;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

    TfLiteStatus QuantizeWeights(
        flatbuffers::FlatBufferBuilder* builder, const tflite::Model* input_model,
        const tflite::TensorType& inference_type,
        const absl::flat_hash_set<std::string>& denylisted_ops,
        const CustomOpMap& custom_op_map,
        int64_t minimum_elements_for_weights = 1024,
        bool disable_per_channel = false, bool weight_only_quantization = false,
        bool legacy_float_scale = false);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

      // be quantized.
      CustomOpMap custom_op_map;
      custom_op_map["CustomTestOp"] = {
          {1},   // quantizable_input_indices
          true,  // is_weight_only
      };
    
      flatbuffers::FlatBufferBuilder builder;
      auto status = QuantizeWeights(&builder, model_, 0, custom_op_map);
      ASSERT_EQ(status, kTfLiteOk);
    
      const uint8_t* buffer = builder.GetBufferPointer();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

      static bool IsQuantizableCustomOp(Operation* op,
                                        const CustomMap& custom_op_map) {
        return false;
      }
    
      // All the quantized ops are supported if the quantization method is dynamic
      // range quantization.
      static bool AllowDynamicRangeQuantizedOperand(
          Operation* quantized_op, const CustomMap& custom_op_map) {
        auto call_op = cast<TF::PartitionedCallOp>(quantized_op);
        StringRef function_name =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/passes.h

    std::unique_ptr<OperationPass<func::FuncOp>> CreatePostQuantizePass();
    std::unique_ptr<OperationPass<func::FuncOp>> CreatePostQuantizePass(
        bool emit_quant_adaptor_ops, const quant::CustomOpMap& custom_op_map = {});
    
    // Creates an instance of the TensorFlow Lite dialect QuantizeVariables pass.
    std::unique_ptr<OperationPass<ModuleOp>> CreatePrepareQuantizeVariablesPass();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 07 21:29:34 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

                            CustomOpMap& custom_op_map);
    
    // Parses the command line flag strings to the quantization specification for
    // input arrays of a graph. The array names are not stored in the spec, and will
    // be matched by position. Returns true if failed.
    bool ParseInputNodeQuantSpecs(absl::string_view node_names,
                                  absl::string_view min_values,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

    using RequiredSameOperandsAndResultsScaleFunc = std::function<bool(bool, int)>;
    // bool RequiredSameQuantizedAxes()
    using RequiredSameQuantizedAxesFunc = std::function<bool()>;
    
    using CustomMap = quant::CustomOpMap;
    
    // Quantization spec of an op, driving the quantization algorithm.
    struct OpQuantSpec {
      // Maps the operand index of a bias input to its quantization specifications,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
Back to top