processing.py 3.6KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. # coding: utf-8
  2. import typing
  3. from multiprocessing import Process
  4. from multiprocessing.connection import Connection
  5. from multiprocessing.connection import Pipe
  6. from synergine2.base import BaseObject
  7. from synergine2.config import Config
  8. from synergine2.share import SharedDataManager
  9. STOP = '__STOP__'
  10. # global shared manager
  11. shared_data = SharedDataManager()
  12. # TODO: se jobs
  13. class Job(object):
  14. pass
  15. class Worker(object):
  16. def __init__(
  17. self,
  18. config: Config,
  19. real_job: typing.Callable[..., typing.Any],
  20. ) -> None:
  21. self.config = config
  22. local_read_pipe, local_write_pipe = Pipe(duplex=False)
  23. process_read_pipe, process_write_pipe = Pipe(duplex=False)
  24. self.local_read_pipe = local_read_pipe # type: Connection
  25. self.local_write_pipe = local_write_pipe # type: Connection
  26. self.process_read_pipe = process_read_pipe # type: Connection
  27. self.process_write_pipe = process_write_pipe # type: Connection
  28. self.real_job = real_job
  29. self.process = Process(
  30. target=self.work,
  31. args=(
  32. self.local_write_pipe,
  33. self.process_read_pipe,
  34. )
  35. )
  36. self.db = None # type: RedisDatabase
  37. self.process.start()
  38. def work(self, *args, **kwargs):
  39. while True:
  40. message = self.process_read_pipe.recv()
  41. if message == STOP:
  42. return
  43. result = self.real_job(message)
  44. self.local_write_pipe.send(result)
  45. class ProcessManager(BaseObject):
  46. def __init__(
  47. self,
  48. config: Config,
  49. process_count: int,
  50. job: typing.Callable[..., typing.Any],
  51. ) -> None:
  52. self.config = config
  53. self._process_count = process_count
  54. self.workers = []
  55. self.start_workers(process_count, job)
  56. def start_workers(self, worker_count: int, job: typing.Callable[..., typing.Any]) -> None:
  57. assert not self.workers
  58. for i in range(worker_count):
  59. self.workers.append(Worker(self.config, job))
  60. def make_them_work(self, message: typing.Any) -> 'TODO':
  61. responses = []
  62. for worker in self.workers:
  63. worker.process_write_pipe.send(message)
  64. for worker in self.workers:
  65. responses.append(worker.local_read_pipe.recv())
  66. return responses
  67. def terminate(self) -> None:
  68. for worker in self.workers:
  69. worker.process_write_pipe.send(STOP)
  70. for worker in self.workers:
  71. worker.process.join()
  72. #
  73. # def chunk_and_execute_jobs(self, data: list, job_maker: types.FunctionType) -> list:
  74. # chunks = self._chunk_manager.make_chunks(data)
  75. #
  76. # if self._process_count > 1:
  77. # print('USE POOL')
  78. # results = self.pool.starmap(job_maker, [(chunk, i, self._process_count) for i, chunk in enumerate(chunks)])
  79. # else:
  80. # print('USE MONO')
  81. # results = [job_maker(data, 0, 1)]
  82. #
  83. # return results
  84. #
  85. # def execute_jobs(self, data: object, job_maker: types.FunctionType) -> list:
  86. # # TODO: Is there a reason to make multiprocessing here ? data is not chunked ...
  87. # if self._process_count > 1:
  88. # results = self.pool.starmap(job_maker, [(data, i, self._process_count) for i in range(self._process_count)])
  89. # else:
  90. # results = [job_maker(data, 0, 1)]
  91. #
  92. # return results
  93. #
  94. # def __del__(self):
  95. # # TODO: DEV
  96. # return
  97. # if self.pool:
  98. # self.pool.terminate()