Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 141 for created (0.23 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types.cc

              !IsQintValueDefinedByIntToQintCast(op->getOperand(i))) {
            new_operands.push_back(rewriter.create<TF::CastOp>(
                op->getLoc(), orig_op_type, operands[i]));
          } else {
            new_operands.push_back(operands[i]);
          }
        }
    
        // Create a new UQ op.
        OperationState state(op->getLoc(), op->getName().getStringRef(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_device.cc

    // Caches a XlaDeviceAllocator per <backend, device ordinal> pair. A
    // XlaDeviceAllocator is created on demand and is associated with a
    // XlaDevice. It outlives the device itself (for instance, the buffer
    // backing a tensor holds a pointer to the allocator for book-keeping,
    // and this buffer can outlast the device).
    class XlaDeviceAllocatorState {
     public:
      // Creates or returns a cached XlaDeviceAllocator for a given
      // backend and device_ordinal.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

      llvm::SmallVector<mlir::TF::SplitOp, 4> split_ops_for_tiled_input;
      split_ops_for_tiled_input.reserve(
          input_sharding.tile_assignment_devices_size());
    
      // Creates a tree of split nodes for sharding tiled inputs. Splits nodes
      // are created such that input data is sharded in row major order.
      // Split nodes at ith depth from the original input node represent nodes
      // that split the input data at i-th dimension.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/replicate_to_island.cc

      builder.setInsertionPoint(&terminator);
      builder.create<tf_executor::YieldOp>(terminator.getLoc(),
                                           terminator.getOperands());
      terminator.erase();
    
      builder.setInsertionPoint(island_op);
      IRMapping mapping;
      for (int i : llvm::seq<int>(0, num_replicas)) {
        // Create new island for replica.
        auto replica = builder.create<tf_executor::IslandOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jul 24 21:01:40 UTC 2023
    - 16.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc

    namespace mlir {
    namespace TF {
    
    namespace {
    
    // Creates ConstOp for int32_t value.
    ConstOp createI32ConstOp(int32_t value, Location loc,
                             PatternRewriter* rewriter) {
      auto int_attr = IntegerAttr::get(rewriter->getIntegerType(32), value);
      return rewriter->create<ConstOp>(loc, int_attr);
    }
    
    // Creates ConstantOp for array of int32_t.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_tpu_device.cc

          absl::StrCat("device: ", DEVICE_TPU_SYSTEM, " device"));
      devices->push_back(std::make_unique<VirtualDevice>(options.env, attrs));
      VLOG(1) << "Created TPU_SYSTEM device. This host has " << device_count
              << " TPUs";
    
      return absl::OkStatus();
    }
    
    }  // namespace
    
    void RegisterTpuDeviceToDeviceCopy() {
      static auto* const register_tpu_tpu_copy = new CopyTensor::Registration(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/transforms/unfuse_batch_norm_pass.cc

          return failure();
        }
    
        // Compute multiplier = scale / sqrt(variance + epsilon)
        Value multiplier = rewriter.create<mhlo::AddOp>(
            bn_op.getLoc(), bn_op.getVariance(), epsilon);
        multiplier = rewriter.create<mhlo::RsqrtOp>(bn_op.getLoc(), multiplier);
        multiplier = rewriter.create<mhlo::MulOp>(bn_op.getLoc(), multiplier,
                                                  bn_op.getScale());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_cluster_formation.cc

        if (tpu_replicated_input.getIsMirroredVariable()) {
          mirrored_variable_indices.push_back(pos_and_input.index());
        }
      }
    
      // Create replicate op.
      auto result_types = GetClusterResultTypes(cluster, partitioned_outputs);
      auto replicate_op = builder.create<mlir::tf_device::ReplicateOp>(
          cluster.getLoc(), num_replicas,
          llvm::SmallDenseMap<llvm::StringRef,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 22:03:30 UTC 2024
    - 39.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc

    // function, which might not exist in case of multi-signature graphs. In that
    // case, this pass will create a new main function, which calls signature
    // functions.
    //
    // An already existing @main function will be renamed by attaching a numeric
    // suffix like `@main_0` to avoid conflict with the newly created main function.
    class InsertMainFunctionPass
        : public PassWrapper<InsertMainFunctionPass, OperationPass<ModuleOp>> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 16.5K bytes
    - Viewed (0)
  10. tensorflow/c/eager/c_api_distributed_test.cc

    // Test to ensure that a registered graph optimization pass is only executed
    // once (i.e., on the main function side) in running distributed functions.
    // This test creates a cluster with two workers, create a variable on the
    // second worker, and run a distributed function (VariableAddFunction) whose ops
    // span the local and remote workers. If the graph optimization pass is executed
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 15 09:49:45 UTC 2024
    - 23.5K bytes
    - Viewed (0)
Back to top