fix: [crawler] timeout QUEUED captures
Some checks are pending
CI / ail_test (3.10) (push) Waiting to run
CI / ail_test (3.7) (push) Waiting to run
CI / ail_test (3.8) (push) Waiting to run
CI / ail_test (3.9) (push) Waiting to run

This commit is contained in:
terrtia 2025-01-09 11:18:29 +01:00
parent 109ce56a4a
commit df161cfd64
No known key found for this signature in database
GPG key ID: 1E1B1F50D84613D0

View file

@ -179,7 +179,14 @@ class Crawler(AbstractModule):
else: else:
capture.update(status) capture.update(status)
elif status == crawlers.CaptureStatus.QUEUED: elif status == crawlers.CaptureStatus.QUEUED:
capture.update(status) capture_start = capture.get_start_time(r_str=False)
if int(time.time()) - capture_start > 600: # TODO ADD in new crawler config
task = capture.get_task()
task.reset()
capture.delete()
self.logger.warning(f'capture QUEUED Timeout, {task.uuid} Send back in queue')
else:
capture.update(status)
print(capture.uuid, crawlers.CaptureStatus(status).name, int(time.time())) print(capture.uuid, crawlers.CaptureStatus(status).name, int(time.time()))
elif status == crawlers.CaptureStatus.ONGOING: elif status == crawlers.CaptureStatus.ONGOING:
capture.update(status) capture.update(status)