Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 192 for stdx (0.14 sec)

  1. tensorflow/compiler/mlir/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo_lib.cc

      return output_filename;
    }
    
    absl::StatusOr<std::string> PywrapSavedModelToStablehlo(
        absl::string_view input_path,
        const std::vector<std::string>& exported_model_signatures,
        const std::vector<std::string>& tag_names,
        absl::string_view input_arg_shapes_str) {
      mlir::DialectRegistry registry;
      RegisterAllTensorFlowDialects(registry);
      mlir::MLIRContext context(registry);
      context.loadAllAvailableDialects();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 22:58:42 UTC 2024
    - 5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc

    // input/output names.
    void GetUniqueInputOutputNodeNames(ModuleOp module_op,
                                       std::vector<std::string>& input_name_vec,
                                       std::vector<std::string>& output_name_vec) {
      bool need_prefix_for_input_name = false;
      bool need_prefix_for_output_name = false;
      std::vector<StringRef> fn_input_name_vec, fn_output_name_vec;
      StringSet<> input_name_set, output_name_set;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 16.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_device.cc

      XlaDeviceAllocatorState();
      ~XlaDeviceAllocatorState();
    
      mutex allocator_mutex_;  // Guards the singleton allocator state.
      std::unordered_map<std::pair<const xla::Backend*, int>,
                         std::unique_ptr<XlaDeviceAllocator>,
                         hash<std::pair<const xla::Backend*, int>>>
          allocators_ TF_GUARDED_BY(allocator_mutex_);
    
      XlaDeviceAllocatorState(const XlaDeviceAllocatorState&) = delete;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc

    absl::Status CompileTensorflowGraphToHlo(
        const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation,
        const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
        const XlaShapeLayoutHelpers::ShapeDeterminationFns
            shape_determination_funcs,
        const std::vector<tensorflow::TensorShape>& arg_shapes,
        std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
        std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 22:19:26 UTC 2024
    - 14K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc

      auto client =
          xla::ClientLibrary::GetOrCreateCompileOnlyClient(cpu_platform).value();
    
      std::vector<TensorShape> arg_shapes;
      TPUCompileMetadataProto metadata_proto;
      bool use_tuple_args = true;
      std::vector<ShardingAndIndex> arg_core_mapping;
      std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
      std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
    
      // This doesn't actually compile correctly.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 23:59:33 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc

      device_assignment.Serialize(&device_assignment_proto);
    
      return std::pair<TPUDevicesAndHosts, xla::DeviceAssignmentProto>(
          std::move(devices_and_hosts), std::move(device_assignment_proto));
    }
    
    mlir::LogicalResult GetTopology(mlir::tf_device::ClusterOp cluster,
                                    std::string& topology) {
      mlir::StringAttr topology_attr =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:10:40 UTC 2024
    - 32.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc

    static absl::StatusOr<std::vector<int>> RewriteWithArgs(
        mlir::ModuleOp module_op, llvm::ArrayRef<XlaArgument> args) {
      mlir::func::FuncOp main_fn =
          module_op.lookupSymbol<mlir::func::FuncOp>("main");
      std::vector<int> params;
    
      bool has_resource_args = false;
      auto builder = mlir::OpBuilder(main_fn.getBody());
      std::vector<int> args_to_erase;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 17:24:39 UTC 2024
    - 45.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc

        return failure();
      }
    
      // Compute slices for each batch in the LHS and RHS.
      std::vector<Value> sliced_lhs =
          sliceInput(input_lhs, bcast.x_batch_size(), loc, rewriter);
      std::vector<Value> sliced_rhs =
          sliceInput(input_rhs, bcast.y_batch_size(), loc, rewriter);
    
      // Compute (single batch) MatMul for each output batch.
      std::vector<Value> matmuls;
      matmuls.reserve(bcast.output_batch_size());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  9. tensorflow/c/kernels_experimental.cc

      TF_VariableInputLockHolder(
          std::vector<tensorflow::Var*> vars,
          std::unique_ptr<std::vector<tensorflow::mutex_lock>> locks,
          std::unique_ptr<std::vector<tensorflow::tf_shared_lock>> shared_locks)
          : vars(std::move(vars)),
            locks(std::move(locks)),
            shared_locks(std::move(shared_locks)) {}
    
      std::vector<tensorflow::Var*> vars;
      std::unique_ptr<std::vector<tensorflow::mutex_lock>> locks;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 23 06:12:29 UTC 2024
    - 30.9K bytes
    - Viewed (0)
  10. tensorflow/c/experimental/saved_model/internal/saved_model_api_test.cc

      const TF_Shape* shape_out = TF_TensorSpecShape(tensor_spec_out);
    
      // Output "output_0" is a scalar, float32 tensor
      EXPECT_EQ("output_0", std::string(TF_SignatureDefParamName(param_out)));
      EXPECT_EQ(TF_FLOAT, TF_TensorSpecDataType(tensor_spec_out));
      EXPECT_EQ(0, TF_ShapeDims(shape_out));
    
      std::vector<TFE_TensorHandle*> compute_fn_inputs;
      TFE_TensorHandle* input_a = TestScalarTensorHandle(ctx, 2.0f);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 23 08:08:45 UTC 2024
    - 21.3K bytes
    - Viewed (0)
Back to top