Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 1,825 for Auto (0.09 sec)

  1. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

                                        const OperatorT& quant_op, int idx) {
      const auto& builtin_code =
          GetBuiltinCode(quant_model.operator_codes[quant_op.opcode_index].get());
      for (const auto& expected_op : expected_graph.operators) {
        const auto& op_code =
            expected_model.operator_codes[expected_op->opcode_index].get();
        const auto& expected_code = GetBuiltinCode(op_code);
        if (expected_code == builtin_code) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc

      auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32);
      auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
      auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32);
      auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT);
      auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE);
      auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 16 18:03:15 UTC 2023
    - 14.7K bytes
    - Viewed (0)
  3. src/cmd/go/testdata/script/mod_vendor_auto.txt

    import _ "example.com/printversion"
    -- $WORK/auto/auto.go --
    package auto
    -- $WORK/auto/replacement-version/go.mod --
    module example.com/version
    -- $WORK/auto/replacement-version/version.go --
    package version
    
    const V = "v1.0.0-replaced"
    -- $WORK/modules-1.14.txt --
    # example.com/printversion v1.0.0
    ## explicit
    example.com/printversion
    # example.com/version v1.0.0 => ./replacement-version
    example.com/version
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 07 15:21:14 UTC 2024
    - 9.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc

        Block& block, const llvm::SmallDenseMap<Value, SizeInfo>& buffer_to_size) {
      auto old_terminator = block.getTerminator();
      auto new_outputs = llvm::to_vector<8>(old_terminator->getOperands());
      llvm::SmallVector<std::tuple<int64_t, int64_t, bool>, 8>
          output_buffer_to_size;
      for (auto retval : llvm::enumerate(old_terminator->getOperands())) {
        auto it = buffer_to_size.find(retval.value());
        if (it == buffer_to_size.end()) continue;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 39.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc

      auto loop_operands_n = UnpackResults(loop_operands_indexes_i);
      auto forward_res_nm2 = UnpackResults(forward_res_indexes_im2);
      auto forward_res_nm1 = UnpackResults(forward_res_indexes_im1);
      auto core_tpu_res_nm2 = UnpackResults(core_tpu_res_indexes_im2);
      auto non_tpu_res_nm1 = UnpackResults(non_tpu_res_indexes_im1);
      auto C_nm2 = new_while_op->getResult(C_index_im2);
      auto C_nm1 = new_while_op->getResult(C_index_im1);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 92.9K bytes
    - Viewed (0)
  6. tensorflow/cc/gradients/math_grad.cc

      }
    
      auto c = op.input(0);
      auto x = op.input(1);
      auto y = op.input(2);
    
      auto zeros = ZerosLike(scope, grad_inputs[0]);
      auto gx = SelectV2(scope, c, grad_inputs[0], zeros);
      auto x_shape = Shape(scope, x);
      auto output_shape = Shape(scope, op.output(0));
    
      // Reduce away broadcasted leading dims.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/device_compilation_cache_test.cc

    TEST(DeviceCompilationCacheTest, StoreMultipleEntries) {
      auto cache = std::make_unique<Cache>();
    
      TF_ASSERT_OK_AND_ASSIGN(auto key1, BuildSampleSignature("foo"));
      TF_ASSERT_OK_AND_ASSIGN(auto key2, BuildSampleSignature("bar"));
    
      auto compilation_result1 = std::make_unique<XlaCompiler::CompilationResult>();
      auto compilation_result2 = std::make_unique<XlaCompiler::CompilationResult>();
      auto executable1 = std::make_unique<FakeExecutable>("foo_exe");
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/translate/export_graphdef.cc

    }
    
    void Exporter::UseOriginalFunctionNames(NodeDef& node_def) {
      if (!configs_.export_original_tf_func_name) return;
    
      auto& attrs = *node_def.mutable_attr();
    
      auto try_use_original_func_name = [this](std::string* name) {
        if (auto func = symbol_table_.lookup<FuncOp>(*name)) {
          if (auto original_func_name =
                  func->getAttrOfType<mlir::StringAttr>("tf._original_func_name")) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 35.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.cc

    }
    
    void Exporter::UseOriginalFunctionNames(NodeDef& node_def) {
      if (!configs_.export_original_tf_func_name) return;
    
      auto& attrs = *node_def.mutable_attr();
    
      auto try_use_original_func_name = [this](std::string* name) {
        if (auto func = symbol_table_.lookup<FuncOp>(*name)) {
          if (auto original_func_name =
                  func->getAttrOfType<mlir::StringAttr>("tf._original_func_name")) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 23:04:51 UTC 2024
    - 35.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc

      for (auto& input : inputs) {
        auto input_op = input.get().getDefiningOp();
        if (!input_op || !IsSupportedHostInputOp(input_op)) return false;
      }
      for (auto entry : llvm::enumerate(inputs)) {
        Value input = entry.value().get();
        auto ranked_type = mlir::dyn_cast<RankedTensorType>(input.getType());
        if (!ranked_type) return false;
        auto input_shape = ranked_type.getShape();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 29.3K bytes
    - Viewed (0)
Back to top