Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 64 for Bias (0.04 sec)

  1. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

        }
    
        // If all quantized or floating point then types are consistent.
        // Int is valid in combination with both quantized and floating point.
        // This occurs when doing qi16 convolution, as bias is passed as a
        // non-quantized int64
        if (allTypesFp || allTypesQuantizedOrInt) return failure();
    
        Location loc = op->getLoc();
        SmallVector<Value> newOperands;
        newOperands.reserve(op->getNumOperands());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h

                                             ArrayRef<Value> results);
    
    // Add the second argument to the first argument, which is expected to be an
    // argument list.
    // Used to attach bias to einsum argument list.
    SmallVector<Value> AppendToVector(ArrayRef<Value> arguments, Value append);
    
    // Checks if the `Method` attatched to the given `tf.XlaCallModule` op has
    // `WeightOnlyPtq`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/tools/op_quant_spec_getters_gen.cc

                     << "<" << op.getQualCppClassName()
                     << ">::GetResultQuantizedType(i));\n";
              matches.clear();
            }
            // There is a "AccumulatorUniformScale" trait, set the type for bias.
            if (acc_uniform_trait_regex.match(trait_str, &matches)) {
              OUT(4) << "spec->biases_params.emplace(std::make_pair(" << matches[1]
                     << ", std::make_pair(tfl.GetAllNonBiasOperands(),"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 15 11:18:44 UTC 2024
    - 4.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/lift_variables_test_pass.h

                 const tensorflow::thread::ThreadPoolOptions& thread_pool_options)
          override {
        for (const std::string& output_name : output_names) {
          Tensor output;
          if (output_name == "dense/bias") {
            Tensor t = Tensor(tensorflow::DT_FLOAT, tensorflow::TensorShape({50}));
            t.flat<float>().setZero();
            outputs->push_back(t);
          } else if (output_name == "dense/kernel") {
            Tensor t =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 21 15:49:06 UTC 2022
    - 5.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_op_interfaces.h

      // Name of the output kernel implementing the contraction fusion.
      std::string output_kernel;
    
      // Indices of additional arguments that will be forwarded to the fused
      // operation (e.g. forward bias vector if fusing BiasAdd operation).
      SmallVector<int, 4> additional_arguments;
    
      // Add additional attributes to the fused node.
      SmallVector<NamedAttribute, 4> additional_attributes;
    };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 03 19:26:14 UTC 2023
    - 6.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/utils/fake_session.cc

        const tensorflow::thread::ThreadPoolOptions& thread_pool_options) {
      Initialize();
      for (const std::string& output_name : output_names) {
        Tensor output;
        if (output_name == "dense/bias") {
          Tensor t = Tensor(tensorflow::DT_FLOAT, tensorflow::TensorShape({50}));
          t.flat<float>().setZero();
          outputs->push_back(t);
        } else if (output_name == "dense/kernel") {
          Tensor t =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Feb 26 03:47:51 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc

          }
    
          // Rewire the outputs.
          result.replaceAllUsesWith(new_result);
        }
    
        // Remove the old op.
        op->erase();
      });
    }
    
    // Fold quantized i32 (normally bias) into their float values.
    struct FoldQuantizedI32ToFloat : public OpRewritePattern<TFL::DequantizeOp> {
      using OpRewritePattern<TFL::DequantizeOp>::OpRewritePattern;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/gen_quantized_function_library.py

      s = s.replace('2D', '2d').replace('3D', '3d')
      snake_case = ''.join(['_' + i.lower() if i.isupper() else i for i in s
                           ]).lstrip('_')
      return snake_case.replace('mat_mul', 'matmul').replace('bias_add', 'bias')
    
    
    def _substitute_impl_function_name_template(module: str) -> str:
      """Generates the op-specific implementation function name."""
      compiled_regex = re.compile(r'GenerateImplFunctionName\(([\w\s]+)\)')
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 20 01:38:06 UTC 2022
    - 8.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

        auto fc_op = rewriter.create<TFL::FullyConnectedOp>(
            bmm_op->getLoc(), ArrayRef<Type>{output_type},
            /*input=*/output_lhs, /*filter=*/output_rhs, /*bias=*/no_input,
            /*fused_activation_function=*/rewriter.getStringAttr("NONE"),
            /*weights_format=*/rewriter.getStringAttr("DEFAULT"),
            /*keep_num_dims=*/rewriter.getBoolAttr(true),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

        // optimizations in the pipeline.
        METHOD_NO_QUANTIZE = 1;
    
        // Static range quantization. Quantized tensor values' ranges are statically
        // determined. The activation and weight are quantized to INT8 while bias is
        // quantized to INT32.
        METHOD_STATIC_RANGE_INT8 = 2;
    
        // Dynamic range quantization. Quantized tensor values' ranges are
        // determined in the graph executions. The weights are quantized during
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
Back to top