- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 138 for REWRITES (0.29 sec)
-
src/cmd/fix/main.go
"os" "path/filepath" "sort" "strings" "cmd/internal/telemetry" ) var ( fset = token.NewFileSet() exitCode = 0 ) var allowedRewrites = flag.String("r", "", "restrict the rewrites to this comma-separated list") var forceRewrites = flag.String("force", "", "force these fixes to run even if the code looks updated") var allowed, force map[string]bool var (
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 14 19:41:17 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/tensorlist_patterns.td
include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td" //===----------------------------------------------------------------------===// // TensorList transformation patterns. // Note that the pattern below rewrites `TensorList` tensors (which has type DT_VARIANT) // into regular tensors. We also assume that each element in the `TensorList` has // a same constant shape.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 16 23:20:46 UTC 2022 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/passes.h
RewritePatternSet &patterns); // Decompose ops. std::unique_ptr<OperationPass<func::FuncOp>> CreateDecomposeTFOpsPass( std::optional<ModuleOp> tfr_module = std::nullopt); // Rewrites quantized operands and results with their storage types. // This pass should be run at module level after decomposition, if there are // quantized operands or results.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_quantized.cc
See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Rewrites ops that require quantized inputs or outputs to ops that allow // non-quantized inputs and outputs. #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.h
#include <memory> #include "llvm/ADT/StringRef.h" #include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project namespace mlir { namespace TFTPU { // Creates a pass that rewrites `tf_device.launch_func` on TPUs into TPU runtime // ops. std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateTPURewritePass( llvm::StringRef module_name = llvm::StringRef());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jan 10 18:58:57 UTC 2024 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.h
#include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { // An OpKernel that compiles an op to an XLA computation and runs it. Unlike // XlaLaunch this doesn't rely on any rewrites of the graphdef - it will run a // vanilla TensorFlow op as long as the bridge supports it. class XlaCompileOnDemandOp : public OpKernel { public: explicit XlaCompileOnDemandOp(OpKernelConstruction* ctx)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/convert_launch_func_to_tf_call.cc
namespace mlir { namespace TFDevice { namespace { #define GEN_PASS_DEF_CONVERTLAUNCHFUNCTOTFCALLPASS #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc" // Rewrites tf_device::LaunchFuncOp into TF::PartitionedCallOp. struct ConvertLaunchFuncToTFCallPass : public impl::ConvertLaunchFuncToTFCallPassBase< ConvertLaunchFuncToTFCallPass> { void runOnOperation() override; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 31 21:08:09 UTC 2023 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.cc
pm.addPass(mlir::createInlinerPass()); pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass()); pm.addPass(mlir::quant::CreateCastBf16OpsToF32Pass()); // Optimizes the graph via cleanups, merges, rewrites, constant folding, // and edge case handling where possible. pm.addNestedPass<mlir::func::FuncOp>( mlir::TF::CreateDropWhileShapeInvariantPass()); pm.addNestedPass<mlir::func::FuncOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 12:49:45 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
FindUserOfType<TFL::QuantizeOp>(op) != nullptr); } }; // Rewrites `stablehlo.convolution` into fused `tfl.conv_2d`. // If available, fuse bias and activation adjacent to `stablehlo.convolution`. // This RewritePattern rewrites both the following into `tfl.conv_2d` op: // // StableHLO Quantizer output: // * input: per-tensor qi8
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0) -
tensorflow/compiler/jit/jit_compilation_pass_registration.cc
#include "tensorflow/compiler/jit/report_clustering_info_pass.h" #include "tensorflow/core/common_runtime/optimization_registry.h" namespace tensorflow { // PRE_PLACEMENT passes: // EncapsulateXlaComputationsPass rewrites computations generated by the // xla.compile() Python code into XlaLaunch nodes. REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 36, EncapsulateXlaComputationsPass); // from
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jan 11 21:53:08 UTC 2023 - 3.8K bytes - Viewed (0)