Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 25 for focusing (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h

        // Safeguard check to ensure that there is at least one quantizable op.
        if (failed(candidate_ops) || candidate_ops->empty()) return failure();
    
        // Rewrite the floating-point ops to the quantized version, by fusing
        // preceding dequantize ops and succeding quantize ops.
        for (Operation* candidate_op : *candidate_ops) {
          // If it is requantize op, we shouldn't rewrite this op.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

    std::unique_ptr<OperationPass<ModuleOp>>
    CreateLiftQuantizableSpotsAsFunctionsPass(
        const tensorflow::quantization::QuantizationOptions& quant_options);
    
    // Apply graph optimizations such as fusing and constant folding to prepare
    // lifting.
    std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareLiftingPass(
        tensorflow::quantization::OpSet target_opset);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

                data_format='NHWC',
                name='sample/conv',
            )
            if bias_fn is not None:
              out = nn_ops.bias_add(out, self.bias)
            if has_batch_norm:
              # Fusing is supported for non-training case.
              out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(
                  out, scale, offset, mean, variance, is_training=False
              )
            if activation_fn is not None:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

        auto users = op.getResult().getUsers();
        quantizing_ops.append(users.begin(), users.end());
    
        bool changed = false;
        // Rewrite the floating-point ops to the quantized version, by fusing
        // preceding dequantize ops and succeding quantize ops.
        for (Operation* quantizing_op : quantizing_ops) {
          // If it is requantize op, we shouldn't rewrite this op.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

                padding='SAME',
                data_format='NHWC',
            )
            if has_bias:
              out = nn_ops.bias_add(out, self.bias)
            if has_batch_norm:
              # Fusing is supported for non-training case.
              out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(
                  out, scale, offset, mean, variance, is_training=False
              )
            if activation_fn is not None:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc

          // Found an operand that isn't scheduled yet, return true.
          return true;
        }
      }
      return false;
    }
    
    // Sorts the operations in the provided range to enforce dominance.
    // This is useful after fusing / reorganizing Operations in a block and later
    // needing to readjust the ordering to ensure dominance.
    LogicalResult SortTopologically(Block::iterator begin, Block::iterator end) {
      Block* block = begin->getBlock();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 27.6K bytes
    - Viewed (0)
  7. architecture/networking/pilot.md

    # Architecture of Istiod
    
    This document describes the high level architecture of the Istio control plane, Istiod.
    Istiod is structured as a modular monolith, housing a wide range of functionality from certificate signing, proxy configuration (XDS), traditional Kubernetes controllers, and more.
    
    ## Proxy Configuration
    
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Wed Feb 07 17:53:24 UTC 2024
    - 19.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

            $filter_width, $filter_height, ActFnAttr),
        [(HasOneUse $pool_out)]>;
    }
    
    // TODO(hinsu): Also fuse ops corresponding to SIGN_BIT fused
    // activation functions.
    // Currently we're not fusing tanh, sigmoid, hard_swish and other activations
    // those cannot be simply translated into clamping.
    foreach actFnPair = [[TFL_ReluOp, TFL_AF_Relu],
                         [TFL_Relu6Op, TFL_AF_Relu6],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/ir/tf_device.cc

        // If the corresponding result of the cluster op is used in some resource
        // update op, do not eliminate the result. Such assignment ops could be for
        // device resources and are required during fusing of the execute op and
        // the resource update ops.
        bool is_used_for_resource_write = llvm::any_of(
            op.getResult(operand.getOperandNumber()).getUsers(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 33.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

          }
        }
        return success();
      }
    
      void rewrite(quantfork::DequantizeCastOp op,
                   PatternRewriter& rewriter) const final {
        // Rewrite the floating-point ops to the quantized version, by fusing
        // preceding dequantize ops and succeding quantize ops.
        for (Operation* op_with_region : op.getResult().getUsers()) {
          // Collect all the quantized inputs and "clone" the matched op by these
          // inputs.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
Back to top