Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 12 for func_ops (0.2 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc

                it->second.data_type;
          }
        }
      }
      func_op.eraseArguments(indices_to_erase);
      func_op.setType(
          FunctionType::get(func_op.getContext(), new_types,
                            llvm::to_vector<4>(return_op->getOperandTypes())));
    }
    
    // Lifts reads/writes of resource arguments from func_op and changes its
    // signature. resource_data_types is the (index, data type) pair for each
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 55.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc

      auto out_types = GetValueTypes(outputs);
      builder.setInsertionPointToEnd(&module.getBodyRegion().back());
      auto func_op = builder.create<func::FuncOp>(
          module.getLoc(), name, builder.getFunctionType(in_types, out_types));
      func_op.setPrivate();
      symbol_table.insert(func_op);
      return func_op;
    }
    
    TF::StatefulPartitionedCallOp EncapsulateOpsInFunc(
        OpBuilder& builder, SymbolTable& symbol_table,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 92.9K bytes
    - Viewed (0)
  3. tensorflow/c/c_api_function_test.cc

      // Use, run, and verify
      TF_Operation* two = ScalarConst(2, host_graph_, s_);
      TF_Operation* func_feed = Placeholder(host_graph_, s_);
      TF_Operation* func_op = Use({two, func_feed});
      Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {3, 2});
      VerifyFDef(empty_, M({{"feed1_0"}, {"feed2_0"}}), M({{"feed2"}, {"feed1"}}),
                 {{"feed1_0", "feed1"}, {"feed2_0", "feed2"}}, {});
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 20 22:08:54 UTC 2023
    - 63.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

            /*is_hybrid=*/is_hybrid);
    
        const mlir::func::FuncOp quantized_func = dyn_cast_or_null<func::FuncOp>(
            symbol_table.lookup(quantized_function_name));
        if (quantized_func == nullptr) {
          call_op->emitError("Failed to find the quantized function: " +
                             quantized_function_name);
          return failure();
        }
        mlir::func::FuncOp new_quantized_func =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/flatbuffer_import.cc

            queue.push_back(op);
          }
        }
      }
    
      return visited;
    }
    
    // We want to adjust the func op according to some cross ops information.
    static StatusOr<FuncOp> PostProcessFuncOp(FuncOp func) {
      OpBuilder builder(func);
      // When a quantized constant is imported, its quantization parameter is set
      // to be narrow range. Here revert to be the fully range if the user doesn't
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 66.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc

        then_branch_op.setVisibility(func::FuncOp::Visibility::Private);
    
        // Constructs `else_branch`, which is executed when `if_cond` evaluates to
        // false.
        auto else_branch_op =
            rewriter.create<func::FuncOp>(loc, "cond_false", func_type);
        CreateCondFalseBranch(loc, shape_dtype, result_type, else_branch_op,
                              &rewriter);
        else_branch_op.setVisibility(func::FuncOp::Visibility::Private);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 70.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td

        func::FuncOp ResolveThenFunction(::mlir::SymbolTableCollection* table) {
          if (table)
            return table->lookupNearestSymbolFrom<func::FuncOp>(*this, getThenBranchAttr());
          return SymbolTable::lookupNearestSymbolFrom<func::FuncOp>(
            *this, getThenBranchAttr());
        }
        // TODO(b/204997177): Deprecate and remove.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 90.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc

      output_types.reserve(outputs.size());
      for (Value v : outputs) output_types.emplace_back(v.getType());
    
      auto func_type = builder->getFunctionType(operand_types, output_types);
    
      FuncOp outlined_func =
          FuncOp::create(ops.front()->getLoc(), kHostFunctionAttr, func_type);
    
      // Create function body.
      Block* outlined_func_block = outlined_func.addEntryBlock();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 21:25:12 UTC 2024
    - 68.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

    // TF2XLA ops aren't supported by later stages.
    LogicalResult ConvertTf2XlaOps(func::FuncOp func, MLIRContext *context) {
      ConversionTarget target(*context);
      target.addLegalDialect<arith::ArithDialect>();
      target.addLegalDialect<func::FuncDialect>();
      target.addLegalDialect<TF::TensorFlowDialect>();
      target.addLegalOp<ModuleOp>();
      target.addLegalOp<func::FuncOp>();
      target.addIllegalOp<TF::XlaConvV2Op>();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

    #include <cstdint>
    #include <memory>
    #include <utility>
    
    #include "absl/algorithm/container.h"
    #include "llvm/ADT/Sequence.h"
    #include "llvm/Support/Debug.h"
    #include "mlir/Dialect/Func/IR/FuncOps.h"  // from @llvm-project
    #include "mlir/Dialect/Quant/QuantOps.h"  // from @llvm-project  // NOLINT: Required to register quantization dialect.
    #include "mlir/Dialect/Quant/QuantTypes.h"  // from @llvm-project
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
Back to top