feat: indie status page MVP -- FastAPI + SQLite
- 8 DB models (services, incidents, monitors, subscribers, etc.) - Full CRUD API for services, incidents, monitors - Public status page with live data - Incident detail page with timeline - API key authentication - Uptime monitoring scheduler - 13 tests passing - TECHNICAL_DESIGN.md with full spec
This commit is contained in:
commit
902133edd3
4655 changed files with 1342691 additions and 0 deletions
11
venv/lib/python3.11/site-packages/apscheduler/__init__.py
Normal file
11
venv/lib/python3.11/site-packages/apscheduler/__init__.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
import importlib.metadata as importlib_metadata
|
||||
import sys
|
||||
|
||||
try:
|
||||
release = importlib_metadata.version("APScheduler").split("-")[0]
|
||||
except importlib_metadata.PackageNotFoundError:
|
||||
release = "3.5.0"
|
||||
|
||||
version_info = tuple(int(x) if x.isdigit() else x for x in release.split("."))
|
||||
version = __version__ = ".".join(str(x) for x in version_info[:3])
|
||||
del sys, importlib_metadata
|
||||
134
venv/lib/python3.11/site-packages/apscheduler/events.py
Normal file
134
venv/lib/python3.11/site-packages/apscheduler/events.py
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
__all__ = (
|
||||
"EVENT_ALL",
|
||||
"EVENT_ALL_JOBS_REMOVED",
|
||||
"EVENT_EXECUTOR_ADDED",
|
||||
"EVENT_EXECUTOR_REMOVED",
|
||||
"EVENT_JOBSTORE_ADDED",
|
||||
"EVENT_JOBSTORE_REMOVED",
|
||||
"EVENT_JOB_ADDED",
|
||||
"EVENT_JOB_ERROR",
|
||||
"EVENT_JOB_EXECUTED",
|
||||
"EVENT_JOB_MAX_INSTANCES",
|
||||
"EVENT_JOB_MISSED",
|
||||
"EVENT_JOB_MODIFIED",
|
||||
"EVENT_JOB_REMOVED",
|
||||
"EVENT_JOB_SUBMITTED",
|
||||
"EVENT_SCHEDULER_PAUSED",
|
||||
"EVENT_SCHEDULER_RESUMED",
|
||||
"EVENT_SCHEDULER_SHUTDOWN",
|
||||
"EVENT_SCHEDULER_STARTED",
|
||||
"JobEvent",
|
||||
"JobExecutionEvent",
|
||||
"JobSubmissionEvent",
|
||||
"SchedulerEvent",
|
||||
)
|
||||
|
||||
|
||||
EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2**0
|
||||
EVENT_SCHEDULER_SHUTDOWN = 2**1
|
||||
EVENT_SCHEDULER_PAUSED = 2**2
|
||||
EVENT_SCHEDULER_RESUMED = 2**3
|
||||
EVENT_EXECUTOR_ADDED = 2**4
|
||||
EVENT_EXECUTOR_REMOVED = 2**5
|
||||
EVENT_JOBSTORE_ADDED = 2**6
|
||||
EVENT_JOBSTORE_REMOVED = 2**7
|
||||
EVENT_ALL_JOBS_REMOVED = 2**8
|
||||
EVENT_JOB_ADDED = 2**9
|
||||
EVENT_JOB_REMOVED = 2**10
|
||||
EVENT_JOB_MODIFIED = 2**11
|
||||
EVENT_JOB_EXECUTED = 2**12
|
||||
EVENT_JOB_ERROR = 2**13
|
||||
EVENT_JOB_MISSED = 2**14
|
||||
EVENT_JOB_SUBMITTED = 2**15
|
||||
EVENT_JOB_MAX_INSTANCES = 2**16
|
||||
EVENT_ALL = (
|
||||
EVENT_SCHEDULER_STARTED
|
||||
| EVENT_SCHEDULER_SHUTDOWN
|
||||
| EVENT_SCHEDULER_PAUSED
|
||||
| EVENT_SCHEDULER_RESUMED
|
||||
| EVENT_EXECUTOR_ADDED
|
||||
| EVENT_EXECUTOR_REMOVED
|
||||
| EVENT_JOBSTORE_ADDED
|
||||
| EVENT_JOBSTORE_REMOVED
|
||||
| EVENT_ALL_JOBS_REMOVED
|
||||
| EVENT_JOB_ADDED
|
||||
| EVENT_JOB_REMOVED
|
||||
| EVENT_JOB_MODIFIED
|
||||
| EVENT_JOB_EXECUTED
|
||||
| EVENT_JOB_ERROR
|
||||
| EVENT_JOB_MISSED
|
||||
| EVENT_JOB_SUBMITTED
|
||||
| EVENT_JOB_MAX_INSTANCES
|
||||
)
|
||||
|
||||
|
||||
class SchedulerEvent:
|
||||
"""
|
||||
An event that concerns the scheduler itself.
|
||||
|
||||
:ivar code: the type code of this event
|
||||
:ivar alias: alias of the job store or executor that was added or removed (if applicable)
|
||||
"""
|
||||
|
||||
def __init__(self, code, alias=None):
|
||||
super().__init__()
|
||||
self.code = code
|
||||
self.alias = alias
|
||||
|
||||
def __repr__(self):
|
||||
return f"<self.__class__.__name__ (code={self.code})>"
|
||||
|
||||
|
||||
class JobEvent(SchedulerEvent):
|
||||
"""
|
||||
An event that concerns a job.
|
||||
|
||||
:ivar code: the type code of this event
|
||||
:ivar job_id: identifier of the job in question
|
||||
:ivar jobstore: alias of the job store containing the job in question
|
||||
"""
|
||||
|
||||
def __init__(self, code, job_id, jobstore):
|
||||
super().__init__(code)
|
||||
self.code = code
|
||||
self.job_id = job_id
|
||||
self.jobstore = jobstore
|
||||
|
||||
|
||||
class JobSubmissionEvent(JobEvent):
|
||||
"""
|
||||
An event that concerns the submission of a job to its executor.
|
||||
|
||||
:ivar scheduled_run_times: a list of datetimes when the job was intended to run
|
||||
"""
|
||||
|
||||
def __init__(self, code, job_id, jobstore, scheduled_run_times):
|
||||
super().__init__(code, job_id, jobstore)
|
||||
self.scheduled_run_times = scheduled_run_times
|
||||
|
||||
|
||||
class JobExecutionEvent(JobEvent):
|
||||
"""
|
||||
An event that concerns the running of a job within its executor.
|
||||
|
||||
:ivar scheduled_run_time: the time when the job was scheduled to be run
|
||||
:ivar retval: the return value of the successfully executed job
|
||||
:ivar exception: the exception raised by the job
|
||||
:ivar traceback: a formatted traceback for the exception
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
code,
|
||||
job_id,
|
||||
jobstore,
|
||||
scheduled_run_time,
|
||||
retval=None,
|
||||
exception=None,
|
||||
traceback=None,
|
||||
):
|
||||
super().__init__(code, job_id, jobstore)
|
||||
self.scheduled_run_time = scheduled_run_time
|
||||
self.retval = retval
|
||||
self.exception = exception
|
||||
self.traceback = traceback
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
import sys
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_coroutine_job, run_job
|
||||
from apscheduler.util import iscoroutinefunction_partial
|
||||
|
||||
|
||||
class AsyncIOExecutor(BaseExecutor):
|
||||
"""
|
||||
Runs jobs in the default executor of the event loop.
|
||||
|
||||
If the job function is a native coroutine function, it is scheduled to be run directly in the
|
||||
event loop as soon as possible. All other functions are run in the event loop's default
|
||||
executor which is usually a thread pool.
|
||||
|
||||
Plugin alias: ``asyncio``
|
||||
"""
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
super().start(scheduler, alias)
|
||||
self._eventloop = scheduler._eventloop
|
||||
self._pending_futures = set()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
# There is no way to honor wait=True without converting this method into a coroutine method
|
||||
for f in self._pending_futures:
|
||||
if not f.done():
|
||||
f.cancel()
|
||||
|
||||
self._pending_futures.clear()
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
def callback(f):
|
||||
self._pending_futures.discard(f)
|
||||
try:
|
||||
events = f.result()
|
||||
except BaseException:
|
||||
self._run_job_error(job.id, *sys.exc_info()[1:])
|
||||
else:
|
||||
self._run_job_success(job.id, events)
|
||||
|
||||
if iscoroutinefunction_partial(job.func):
|
||||
coro = run_coroutine_job(
|
||||
job, job._jobstore_alias, run_times, self._logger.name
|
||||
)
|
||||
f = self._eventloop.create_task(coro)
|
||||
else:
|
||||
f = self._eventloop.run_in_executor(
|
||||
None, run_job, job, job._jobstore_alias, run_times, self._logger.name
|
||||
)
|
||||
|
||||
f.add_done_callback(callback)
|
||||
self._pending_futures.add(f)
|
||||
205
venv/lib/python3.11/site-packages/apscheduler/executors/base.py
Normal file
205
venv/lib/python3.11/site-packages/apscheduler/executors/base.py
Normal file
|
|
@ -0,0 +1,205 @@
|
|||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from traceback import format_tb
|
||||
|
||||
from apscheduler.events import (
|
||||
EVENT_JOB_ERROR,
|
||||
EVENT_JOB_EXECUTED,
|
||||
EVENT_JOB_MISSED,
|
||||
JobExecutionEvent,
|
||||
)
|
||||
|
||||
|
||||
class MaxInstancesReachedError(Exception):
|
||||
def __init__(self, job):
|
||||
super().__init__(
|
||||
f'Job "{job.id}" has already reached its maximum number of instances '
|
||||
f"({job.max_instances})"
|
||||
)
|
||||
|
||||
|
||||
class BaseExecutor(metaclass=ABCMeta):
|
||||
"""Abstract base class that defines the interface that every executor must implement."""
|
||||
|
||||
_scheduler = None
|
||||
_lock = None
|
||||
_logger = logging.getLogger("apscheduler.executors")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._instances = defaultdict(lambda: 0)
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
"""
|
||||
Called by the scheduler when the scheduler is being started or when the executor is being
|
||||
added to an already running scheduler.
|
||||
|
||||
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
|
||||
this executor
|
||||
:param str|unicode alias: alias of this executor as it was assigned to the scheduler
|
||||
|
||||
"""
|
||||
self._scheduler = scheduler
|
||||
self._lock = scheduler._create_lock()
|
||||
self._logger = logging.getLogger(f"apscheduler.executors.{alias}")
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
"""
|
||||
Shuts down this executor.
|
||||
|
||||
:param bool wait: ``True`` to wait until all submitted jobs
|
||||
have been executed
|
||||
"""
|
||||
|
||||
def submit_job(self, job, run_times):
|
||||
"""
|
||||
Submits job for execution.
|
||||
|
||||
:param Job job: job to execute
|
||||
:param list[datetime] run_times: list of datetimes specifying
|
||||
when the job should have been run
|
||||
:raises MaxInstancesReachedError: if the maximum number of
|
||||
allowed instances for this job has been reached
|
||||
|
||||
"""
|
||||
assert self._lock is not None, "This executor has not been started yet"
|
||||
with self._lock:
|
||||
if self._instances[job.id] >= job.max_instances:
|
||||
raise MaxInstancesReachedError(job)
|
||||
|
||||
self._do_submit_job(job, run_times)
|
||||
self._instances[job.id] += 1
|
||||
|
||||
@abstractmethod
|
||||
def _do_submit_job(self, job, run_times):
|
||||
"""Performs the actual task of scheduling `run_job` to be called."""
|
||||
|
||||
def _run_job_success(self, job_id, events):
|
||||
"""
|
||||
Called by the executor with the list of generated events when :func:`run_job` has been
|
||||
successfully called.
|
||||
|
||||
"""
|
||||
with self._lock:
|
||||
self._instances[job_id] -= 1
|
||||
if self._instances[job_id] == 0:
|
||||
del self._instances[job_id]
|
||||
|
||||
for event in events:
|
||||
self._scheduler._dispatch_event(event)
|
||||
|
||||
def _run_job_error(self, job_id, exc, traceback=None):
|
||||
"""Called by the executor with the exception if there is an error calling `run_job`."""
|
||||
with self._lock:
|
||||
self._instances[job_id] -= 1
|
||||
if self._instances[job_id] == 0:
|
||||
del self._instances[job_id]
|
||||
|
||||
exc_info = (exc.__class__, exc, traceback)
|
||||
self._logger.error("Error running job %s", job_id, exc_info=exc_info)
|
||||
|
||||
|
||||
def run_job(job, jobstore_alias, run_times, logger_name):
|
||||
"""
|
||||
Called by executors to run the job. Returns a list of scheduler events to be dispatched by the
|
||||
scheduler.
|
||||
|
||||
"""
|
||||
events = []
|
||||
logger = logging.getLogger(logger_name)
|
||||
for run_time in run_times:
|
||||
# See if the job missed its run time window, and handle
|
||||
# possible misfires accordingly
|
||||
if job.misfire_grace_time is not None:
|
||||
difference = datetime.now(timezone.utc) - run_time
|
||||
grace_time = timedelta(seconds=job.misfire_grace_time)
|
||||
if difference > grace_time:
|
||||
events.append(
|
||||
JobExecutionEvent(
|
||||
EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
|
||||
)
|
||||
)
|
||||
logger.warning('Run time of job "%s" was missed by %s', job, difference)
|
||||
continue
|
||||
|
||||
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
|
||||
try:
|
||||
retval = job.func(*job.args, **job.kwargs)
|
||||
except BaseException:
|
||||
exc, tb = sys.exc_info()[1:]
|
||||
formatted_tb = "".join(format_tb(tb))
|
||||
events.append(
|
||||
JobExecutionEvent(
|
||||
EVENT_JOB_ERROR,
|
||||
job.id,
|
||||
jobstore_alias,
|
||||
run_time,
|
||||
exception=exc,
|
||||
traceback=formatted_tb,
|
||||
)
|
||||
)
|
||||
logger.exception('Job "%s" raised an exception', job)
|
||||
|
||||
# This is to prevent cyclic references that would lead to memory leaks
|
||||
traceback.clear_frames(tb)
|
||||
del tb
|
||||
else:
|
||||
events.append(
|
||||
JobExecutionEvent(
|
||||
EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
|
||||
)
|
||||
)
|
||||
logger.info('Job "%s" executed successfully', job)
|
||||
|
||||
return events
|
||||
|
||||
|
||||
async def run_coroutine_job(job, jobstore_alias, run_times, logger_name):
|
||||
"""Coroutine version of run_job()."""
|
||||
events = []
|
||||
logger = logging.getLogger(logger_name)
|
||||
for run_time in run_times:
|
||||
# See if the job missed its run time window, and handle possible misfires accordingly
|
||||
if job.misfire_grace_time is not None:
|
||||
difference = datetime.now(timezone.utc) - run_time
|
||||
grace_time = timedelta(seconds=job.misfire_grace_time)
|
||||
if difference > grace_time:
|
||||
events.append(
|
||||
JobExecutionEvent(
|
||||
EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
|
||||
)
|
||||
)
|
||||
logger.warning('Run time of job "%s" was missed by %s', job, difference)
|
||||
continue
|
||||
|
||||
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
|
||||
try:
|
||||
retval = await job.func(*job.args, **job.kwargs)
|
||||
except BaseException:
|
||||
exc, tb = sys.exc_info()[1:]
|
||||
formatted_tb = "".join(format_tb(tb))
|
||||
events.append(
|
||||
JobExecutionEvent(
|
||||
EVENT_JOB_ERROR,
|
||||
job.id,
|
||||
jobstore_alias,
|
||||
run_time,
|
||||
exception=exc,
|
||||
traceback=formatted_tb,
|
||||
)
|
||||
)
|
||||
logger.exception('Job "%s" raised an exception', job)
|
||||
traceback.clear_frames(tb)
|
||||
else:
|
||||
events.append(
|
||||
JobExecutionEvent(
|
||||
EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
|
||||
)
|
||||
)
|
||||
logger.info('Job "%s" executed successfully', job)
|
||||
|
||||
return events
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
import sys
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
|
||||
class DebugExecutor(BaseExecutor):
|
||||
"""
|
||||
A special executor that executes the target callable directly instead of deferring it to a
|
||||
thread or process.
|
||||
|
||||
Plugin alias: ``debug``
|
||||
"""
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
try:
|
||||
events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
|
||||
except BaseException:
|
||||
self._run_job_error(job.id, *sys.exc_info()[1:])
|
||||
else:
|
||||
self._run_job_success(job.id, events)
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
import sys
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
try:
|
||||
import gevent
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("GeventExecutor requires gevent installed") from exc
|
||||
|
||||
|
||||
class GeventExecutor(BaseExecutor):
|
||||
"""
|
||||
Runs jobs as greenlets.
|
||||
|
||||
Plugin alias: ``gevent``
|
||||
"""
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
def callback(greenlet):
|
||||
try:
|
||||
events = greenlet.get()
|
||||
except BaseException:
|
||||
self._run_job_error(job.id, *sys.exc_info()[1:])
|
||||
else:
|
||||
self._run_job_success(job.id, events)
|
||||
|
||||
gevent.spawn(
|
||||
run_job, job, job._jobstore_alias, run_times, self._logger.name
|
||||
).link(callback)
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
import concurrent.futures
|
||||
import multiprocessing
|
||||
from abc import abstractmethod
|
||||
from concurrent.futures.process import BrokenProcessPool
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
|
||||
class BasePoolExecutor(BaseExecutor):
|
||||
@abstractmethod
|
||||
def __init__(self, pool):
|
||||
super().__init__()
|
||||
self._pool = pool
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
def callback(f):
|
||||
exc, tb = (
|
||||
f.exception_info()
|
||||
if hasattr(f, "exception_info")
|
||||
else (f.exception(), getattr(f.exception(), "__traceback__", None))
|
||||
)
|
||||
if exc:
|
||||
self._run_job_error(job.id, exc, tb)
|
||||
else:
|
||||
self._run_job_success(job.id, f.result())
|
||||
|
||||
f = self._pool.submit(
|
||||
run_job, job, job._jobstore_alias, run_times, self._logger.name
|
||||
)
|
||||
f.add_done_callback(callback)
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
self._pool.shutdown(wait)
|
||||
|
||||
|
||||
class ThreadPoolExecutor(BasePoolExecutor):
|
||||
"""
|
||||
An executor that runs jobs in a concurrent.futures thread pool.
|
||||
|
||||
Plugin alias: ``threadpool``
|
||||
|
||||
:param max_workers: the maximum number of spawned threads.
|
||||
:param pool_kwargs: dict of keyword arguments to pass to the underlying
|
||||
ThreadPoolExecutor constructor
|
||||
"""
|
||||
|
||||
def __init__(self, max_workers=10, pool_kwargs=None):
|
||||
pool_kwargs = pool_kwargs or {}
|
||||
pool = concurrent.futures.ThreadPoolExecutor(int(max_workers), **pool_kwargs)
|
||||
super().__init__(pool)
|
||||
|
||||
|
||||
class ProcessPoolExecutor(BasePoolExecutor):
|
||||
"""
|
||||
An executor that runs jobs in a concurrent.futures process pool.
|
||||
|
||||
Plugin alias: ``processpool``
|
||||
|
||||
:param max_workers: the maximum number of spawned processes.
|
||||
:param pool_kwargs: dict of keyword arguments to pass to the underlying
|
||||
ProcessPoolExecutor constructor
|
||||
"""
|
||||
|
||||
def __init__(self, max_workers=10, pool_kwargs=None):
|
||||
self.pool_kwargs = pool_kwargs or {}
|
||||
self.pool_kwargs.setdefault("mp_context", multiprocessing.get_context("spawn"))
|
||||
pool = concurrent.futures.ProcessPoolExecutor(
|
||||
int(max_workers), **self.pool_kwargs
|
||||
)
|
||||
super().__init__(pool)
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
try:
|
||||
super()._do_submit_job(job, run_times)
|
||||
except BrokenProcessPool:
|
||||
self._logger.warning(
|
||||
"Process pool is broken; replacing pool with a fresh instance"
|
||||
)
|
||||
self._pool = self._pool.__class__(
|
||||
self._pool._max_workers, **self.pool_kwargs
|
||||
)
|
||||
super()._do_submit_job(job, run_times)
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
import sys
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from tornado.gen import convert_yielded
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_coroutine_job, run_job
|
||||
from apscheduler.util import iscoroutinefunction_partial
|
||||
|
||||
|
||||
class TornadoExecutor(BaseExecutor):
|
||||
"""
|
||||
Runs jobs either in a thread pool or directly on the I/O loop.
|
||||
|
||||
If the job function is a native coroutine function, it is scheduled to be run directly in the
|
||||
I/O loop as soon as possible. All other functions are run in a thread pool.
|
||||
|
||||
Plugin alias: ``tornado``
|
||||
|
||||
:param int max_workers: maximum number of worker threads in the thread pool
|
||||
"""
|
||||
|
||||
def __init__(self, max_workers=10):
|
||||
super().__init__()
|
||||
self.executor = ThreadPoolExecutor(max_workers)
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
super().start(scheduler, alias)
|
||||
self._ioloop = scheduler._ioloop
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
def callback(f):
|
||||
try:
|
||||
events = f.result()
|
||||
except BaseException:
|
||||
self._run_job_error(job.id, *sys.exc_info()[1:])
|
||||
else:
|
||||
self._run_job_success(job.id, events)
|
||||
|
||||
if iscoroutinefunction_partial(job.func):
|
||||
f = run_coroutine_job(
|
||||
job, job._jobstore_alias, run_times, self._logger.name
|
||||
)
|
||||
else:
|
||||
f = self.executor.submit(
|
||||
run_job, job, job._jobstore_alias, run_times, self._logger.name
|
||||
)
|
||||
|
||||
f = convert_yielded(f)
|
||||
f.add_done_callback(callback)
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
|
||||
class TwistedExecutor(BaseExecutor):
|
||||
"""
|
||||
Runs jobs in the reactor's thread pool.
|
||||
|
||||
Plugin alias: ``twisted``
|
||||
"""
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
super().start(scheduler, alias)
|
||||
self._reactor = scheduler._reactor
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
def callback(success, result):
|
||||
if success:
|
||||
self._run_job_success(job.id, result)
|
||||
else:
|
||||
self._run_job_error(job.id, result.value, result.tb)
|
||||
|
||||
self._reactor.getThreadPool().callInThreadWithCallback(
|
||||
callback, run_job, job, job._jobstore_alias, run_times, self._logger.name
|
||||
)
|
||||
333
venv/lib/python3.11/site-packages/apscheduler/job.py
Normal file
333
venv/lib/python3.11/site-packages/apscheduler/job.py
Normal file
|
|
@ -0,0 +1,333 @@
|
|||
from collections.abc import Iterable, Mapping
|
||||
from datetime import timezone
|
||||
from inspect import isclass, ismethod
|
||||
from uuid import uuid4
|
||||
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.util import (
|
||||
check_callable_args,
|
||||
convert_to_datetime,
|
||||
datetime_repr,
|
||||
get_callable_name,
|
||||
obj_to_ref,
|
||||
ref_to_obj,
|
||||
)
|
||||
|
||||
UTC = timezone.utc
|
||||
|
||||
|
||||
class Job:
|
||||
"""
|
||||
Contains the options given when scheduling callables and its current schedule and other state.
|
||||
This class should never be instantiated by the user.
|
||||
|
||||
:var str id: the unique identifier of this job
|
||||
:var str name: the description of this job
|
||||
:var func: the callable to execute
|
||||
:var tuple|list args: positional arguments to the callable
|
||||
:var dict kwargs: keyword arguments to the callable
|
||||
:var bool coalesce: whether to only run the job once when several run times are due
|
||||
:var trigger: the trigger object that controls the schedule of this job
|
||||
:var str executor: the name of the executor that will run this job
|
||||
:var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to
|
||||
be late (``None`` means "allow the job to run no matter how late it is")
|
||||
:var int max_instances: the maximum number of concurrently executing instances allowed for this
|
||||
job
|
||||
:var datetime.datetime next_run_time: the next scheduled run time of this job
|
||||
|
||||
.. note::
|
||||
The ``misfire_grace_time`` has some non-obvious effects on job execution. See the
|
||||
:ref:`missed-job-executions` section in the documentation for an in-depth explanation.
|
||||
"""
|
||||
|
||||
__slots__ = (
|
||||
"__weakref__",
|
||||
"_jobstore_alias",
|
||||
"_scheduler",
|
||||
"args",
|
||||
"coalesce",
|
||||
"executor",
|
||||
"func",
|
||||
"func_ref",
|
||||
"id",
|
||||
"kwargs",
|
||||
"max_instances",
|
||||
"misfire_grace_time",
|
||||
"name",
|
||||
"next_run_time",
|
||||
"trigger",
|
||||
)
|
||||
|
||||
def __init__(self, scheduler, id=None, **kwargs):
|
||||
super().__init__()
|
||||
self._scheduler = scheduler
|
||||
self._jobstore_alias = None
|
||||
self._modify(id=id or uuid4().hex, **kwargs)
|
||||
|
||||
def modify(self, **changes):
|
||||
"""
|
||||
Makes the given changes to this job and saves it in the associated job store.
|
||||
|
||||
Accepted keyword arguments are the same as the variables on this class.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job`
|
||||
|
||||
:return Job: this job instance
|
||||
|
||||
"""
|
||||
self._scheduler.modify_job(self.id, self._jobstore_alias, **changes)
|
||||
return self
|
||||
|
||||
def reschedule(self, trigger, **trigger_args):
|
||||
"""
|
||||
Shortcut for switching the trigger on this job.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job`
|
||||
|
||||
:return Job: this job instance
|
||||
|
||||
"""
|
||||
self._scheduler.reschedule_job(
|
||||
self.id, self._jobstore_alias, trigger, **trigger_args
|
||||
)
|
||||
return self
|
||||
|
||||
def pause(self):
|
||||
"""
|
||||
Temporarily suspend the execution of this job.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job`
|
||||
|
||||
:return Job: this job instance
|
||||
|
||||
"""
|
||||
self._scheduler.pause_job(self.id, self._jobstore_alias)
|
||||
return self
|
||||
|
||||
def resume(self):
|
||||
"""
|
||||
Resume the schedule of this job if previously paused.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job`
|
||||
|
||||
:return Job: this job instance
|
||||
|
||||
"""
|
||||
self._scheduler.resume_job(self.id, self._jobstore_alias)
|
||||
return self
|
||||
|
||||
def remove(self):
|
||||
"""
|
||||
Unschedules this job and removes it from its associated job store.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job`
|
||||
|
||||
"""
|
||||
self._scheduler.remove_job(self.id, self._jobstore_alias)
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
"""
|
||||
Returns ``True`` if the referenced job is still waiting to be added to its designated job
|
||||
store.
|
||||
|
||||
"""
|
||||
return self._jobstore_alias is None
|
||||
|
||||
#
|
||||
# Private API
|
||||
#
|
||||
|
||||
def _get_run_times(self, now):
|
||||
"""
|
||||
Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive).
|
||||
|
||||
:type now: datetime.datetime
|
||||
:rtype: list[datetime.datetime]
|
||||
|
||||
"""
|
||||
run_times = []
|
||||
next_run_time = self.next_run_time
|
||||
while next_run_time and next_run_time.astimezone(UTC) <= now.astimezone(UTC):
|
||||
run_times.append(next_run_time)
|
||||
next_run_time = self.trigger.get_next_fire_time(next_run_time, now)
|
||||
|
||||
return run_times
|
||||
|
||||
def _modify(self, **changes):
|
||||
"""
|
||||
Validates the changes to the Job and makes the modifications if and only if all of them
|
||||
validate.
|
||||
|
||||
"""
|
||||
approved = {}
|
||||
|
||||
if "id" in changes:
|
||||
value = changes.pop("id")
|
||||
if not isinstance(value, str):
|
||||
raise TypeError("id must be a nonempty string")
|
||||
if hasattr(self, "id"):
|
||||
raise ValueError("The job ID may not be changed")
|
||||
approved["id"] = value
|
||||
|
||||
if "func" in changes or "args" in changes or "kwargs" in changes:
|
||||
func = changes.pop("func") if "func" in changes else self.func
|
||||
args = changes.pop("args") if "args" in changes else self.args
|
||||
kwargs = changes.pop("kwargs") if "kwargs" in changes else self.kwargs
|
||||
|
||||
if isinstance(func, str):
|
||||
func_ref = func
|
||||
func = ref_to_obj(func)
|
||||
elif callable(func):
|
||||
try:
|
||||
func_ref = obj_to_ref(func)
|
||||
except ValueError:
|
||||
# If this happens, this Job won't be serializable
|
||||
func_ref = None
|
||||
else:
|
||||
raise TypeError("func must be a callable or a textual reference to one")
|
||||
|
||||
if not hasattr(self, "name") and changes.get("name", None) is None:
|
||||
changes["name"] = get_callable_name(func)
|
||||
|
||||
if isinstance(args, str) or not isinstance(args, Iterable):
|
||||
raise TypeError("args must be a non-string iterable")
|
||||
if isinstance(kwargs, str) or not isinstance(kwargs, Mapping):
|
||||
raise TypeError("kwargs must be a dict-like object")
|
||||
|
||||
check_callable_args(func, args, kwargs)
|
||||
|
||||
approved["func"] = func
|
||||
approved["func_ref"] = func_ref
|
||||
approved["args"] = args
|
||||
approved["kwargs"] = kwargs
|
||||
|
||||
if "name" in changes:
|
||||
value = changes.pop("name")
|
||||
if not value or not isinstance(value, str):
|
||||
raise TypeError("name must be a nonempty string")
|
||||
approved["name"] = value
|
||||
|
||||
if "misfire_grace_time" in changes:
|
||||
value = changes.pop("misfire_grace_time")
|
||||
if value is not None and (not isinstance(value, int) or value <= 0):
|
||||
raise TypeError(
|
||||
"misfire_grace_time must be either None or a positive integer"
|
||||
)
|
||||
approved["misfire_grace_time"] = value
|
||||
|
||||
if "coalesce" in changes:
|
||||
value = bool(changes.pop("coalesce"))
|
||||
approved["coalesce"] = value
|
||||
|
||||
if "max_instances" in changes:
|
||||
value = changes.pop("max_instances")
|
||||
if not isinstance(value, int) or value <= 0:
|
||||
raise TypeError("max_instances must be a positive integer")
|
||||
approved["max_instances"] = value
|
||||
|
||||
if "trigger" in changes:
|
||||
trigger = changes.pop("trigger")
|
||||
if not isinstance(trigger, BaseTrigger):
|
||||
raise TypeError(
|
||||
f"Expected a trigger instance, got {trigger.__class__.__name__} instead"
|
||||
)
|
||||
|
||||
approved["trigger"] = trigger
|
||||
|
||||
if "executor" in changes:
|
||||
value = changes.pop("executor")
|
||||
if not isinstance(value, str):
|
||||
raise TypeError("executor must be a string")
|
||||
approved["executor"] = value
|
||||
|
||||
if "next_run_time" in changes:
|
||||
value = changes.pop("next_run_time")
|
||||
approved["next_run_time"] = convert_to_datetime(
|
||||
value, self._scheduler.timezone, "next_run_time"
|
||||
)
|
||||
|
||||
if changes:
|
||||
raise AttributeError(
|
||||
"The following are not modifiable attributes of Job: {}".format(
|
||||
", ".join(changes)
|
||||
)
|
||||
)
|
||||
|
||||
for key, value in approved.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
def __getstate__(self):
|
||||
# Don't allow this Job to be serialized if the function reference could not be determined
|
||||
if not self.func_ref:
|
||||
raise ValueError(
|
||||
f"This Job cannot be serialized since the reference to its callable ({self.func!r}) could not "
|
||||
"be determined. Consider giving a textual reference (module:function name) "
|
||||
"instead."
|
||||
)
|
||||
|
||||
# Instance methods cannot survive serialization as-is, so store the "self" argument
|
||||
# explicitly
|
||||
func = self.func
|
||||
if (
|
||||
ismethod(func)
|
||||
and not isclass(func.__self__)
|
||||
and obj_to_ref(func) == self.func_ref
|
||||
):
|
||||
args = (func.__self__,) + tuple(self.args)
|
||||
else:
|
||||
args = self.args
|
||||
|
||||
return {
|
||||
"version": 1,
|
||||
"id": self.id,
|
||||
"func": self.func_ref,
|
||||
"trigger": self.trigger,
|
||||
"executor": self.executor,
|
||||
"args": args,
|
||||
"kwargs": self.kwargs,
|
||||
"name": self.name,
|
||||
"misfire_grace_time": self.misfire_grace_time,
|
||||
"coalesce": self.coalesce,
|
||||
"max_instances": self.max_instances,
|
||||
"next_run_time": self.next_run_time,
|
||||
}
|
||||
|
||||
def __setstate__(self, state):
|
||||
if state.get("version", 1) > 1:
|
||||
raise ValueError(
|
||||
f"Job has version {state['version']}, but only version 1 can be handled"
|
||||
)
|
||||
|
||||
self.id = state["id"]
|
||||
self.func_ref = state["func"]
|
||||
self.func = ref_to_obj(self.func_ref)
|
||||
self.trigger = state["trigger"]
|
||||
self.executor = state["executor"]
|
||||
self.args = state["args"]
|
||||
self.kwargs = state["kwargs"]
|
||||
self.name = state["name"]
|
||||
self.misfire_grace_time = state["misfire_grace_time"]
|
||||
self.coalesce = state["coalesce"]
|
||||
self.max_instances = state["max_instances"]
|
||||
self.next_run_time = state["next_run_time"]
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Job):
|
||||
return self.id == other.id
|
||||
return NotImplemented
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Job (id={self.id} name={self.name})>"
|
||||
|
||||
def __str__(self):
|
||||
if hasattr(self, "next_run_time"):
|
||||
status = (
|
||||
"next run at: " + datetime_repr(self.next_run_time)
|
||||
if self.next_run_time
|
||||
else "paused"
|
||||
)
|
||||
else:
|
||||
status = "pending"
|
||||
|
||||
return f"{self.name} (trigger: {self.trigger}, {status})"
|
||||
141
venv/lib/python3.11/site-packages/apscheduler/jobstores/base.py
Normal file
141
venv/lib/python3.11/site-packages/apscheduler/jobstores/base.py
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
import logging
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
|
||||
class JobLookupError(KeyError):
|
||||
"""Raised when the job store cannot find a job for update or removal."""
|
||||
|
||||
def __init__(self, job_id):
|
||||
super().__init__(f"No job by the id of {job_id} was found")
|
||||
|
||||
|
||||
class ConflictingIdError(KeyError):
|
||||
"""Raised when the uniqueness of job IDs is being violated."""
|
||||
|
||||
def __init__(self, job_id):
|
||||
super().__init__(f"Job identifier ({job_id}) conflicts with an existing job")
|
||||
|
||||
|
||||
class TransientJobError(ValueError):
|
||||
"""
|
||||
Raised when an attempt to add transient (with no func_ref) job to a persistent job store is
|
||||
detected.
|
||||
"""
|
||||
|
||||
def __init__(self, job_id):
|
||||
super().__init__(
|
||||
f"Job ({job_id}) cannot be added to this job store because a reference to the callable "
|
||||
"could not be determined."
|
||||
)
|
||||
|
||||
|
||||
class BaseJobStore(metaclass=ABCMeta):
|
||||
"""Abstract base class that defines the interface that every job store must implement."""
|
||||
|
||||
_scheduler = None
|
||||
_alias = None
|
||||
_logger = logging.getLogger("apscheduler.jobstores")
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
"""
|
||||
Called by the scheduler when the scheduler is being started or when the job store is being
|
||||
added to an already running scheduler.
|
||||
|
||||
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
|
||||
this job store
|
||||
:param str|unicode alias: alias of this job store as it was assigned to the scheduler
|
||||
"""
|
||||
|
||||
self._scheduler = scheduler
|
||||
self._alias = alias
|
||||
self._logger = logging.getLogger(f"apscheduler.jobstores.{alias}")
|
||||
|
||||
def shutdown(self):
|
||||
"""Frees any resources still bound to this job store."""
|
||||
|
||||
def _fix_paused_jobs_sorting(self, jobs):
|
||||
for i, job in enumerate(jobs):
|
||||
if job.next_run_time is not None:
|
||||
if i > 0:
|
||||
paused_jobs = jobs[:i]
|
||||
del jobs[:i]
|
||||
jobs.extend(paused_jobs)
|
||||
break
|
||||
|
||||
@abstractmethod
|
||||
def lookup_job(self, job_id):
|
||||
"""
|
||||
Returns a specific job, or ``None`` if it isn't found..
|
||||
|
||||
The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of
|
||||
the returned job to point to the scheduler and itself, respectively.
|
||||
|
||||
:param str|unicode job_id: identifier of the job
|
||||
:rtype: Job
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_due_jobs(self, now):
|
||||
"""
|
||||
Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``.
|
||||
The returned jobs must be sorted by next run time (ascending).
|
||||
|
||||
:param datetime.datetime now: the current (timezone aware) datetime
|
||||
:rtype: list[Job]
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_next_run_time(self):
|
||||
"""
|
||||
Returns the earliest run time of all the jobs stored in this job store, or ``None`` if
|
||||
there are no active jobs.
|
||||
|
||||
:rtype: datetime.datetime
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_all_jobs(self):
|
||||
"""
|
||||
Returns a list of all jobs in this job store.
|
||||
The returned jobs should be sorted by next run time (ascending).
|
||||
Paused jobs (next_run_time == None) should be sorted last.
|
||||
|
||||
The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of
|
||||
the returned jobs to point to the scheduler and itself, respectively.
|
||||
|
||||
:rtype: list[Job]
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def add_job(self, job):
|
||||
"""
|
||||
Adds the given job to this store.
|
||||
|
||||
:param Job job: the job to add
|
||||
:raises ConflictingIdError: if there is another job in this store with the same ID
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def update_job(self, job):
|
||||
"""
|
||||
Replaces the job in the store with the given newer version.
|
||||
|
||||
:param Job job: the job to update
|
||||
:raises JobLookupError: if the job does not exist
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def remove_job(self, job_id):
|
||||
"""
|
||||
Removes the given job from this store.
|
||||
|
||||
:param str|unicode job_id: identifier of the job
|
||||
:raises JobLookupError: if the job does not exist
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def remove_all_jobs(self):
|
||||
"""Removes all jobs from this store."""
|
||||
|
||||
def __repr__(self):
|
||||
return f"<{self.__class__.__name__}>"
|
||||
170
venv/lib/python3.11/site-packages/apscheduler/jobstores/etcd.py
Normal file
170
venv/lib/python3.11/site-packages/apscheduler/jobstores/etcd.py
Normal file
|
|
@ -0,0 +1,170 @@
|
|||
import pickle
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from apscheduler.job import Job
|
||||
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
|
||||
from apscheduler.util import (
|
||||
datetime_to_utc_timestamp,
|
||||
maybe_ref,
|
||||
utc_timestamp_to_datetime,
|
||||
)
|
||||
|
||||
try:
|
||||
from etcd3 import Etcd3Client
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("EtcdJobStore requires etcd3 be installed") from exc
|
||||
|
||||
|
||||
class EtcdJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in a etcd. Any leftover keyword arguments are directly passed to
|
||||
etcd3's `etcd3.client
|
||||
<https://python-etcd3.readthedocs.io/en/latest/readme.html>`_.
|
||||
|
||||
Plugin alias: ``etcd``
|
||||
|
||||
:param str path: path to store jobs in
|
||||
:param client: a :class:`~etcd3.client.etcd3` instance to use instead of
|
||||
providing connection arguments
|
||||
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
|
||||
highest available
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path="/apscheduler",
|
||||
client=None,
|
||||
close_connection_on_exit=False,
|
||||
pickle_protocol=pickle.DEFAULT_PROTOCOL,
|
||||
**connect_args,
|
||||
):
|
||||
super().__init__()
|
||||
self.pickle_protocol = pickle_protocol
|
||||
self.close_connection_on_exit = close_connection_on_exit
|
||||
|
||||
if not path:
|
||||
raise ValueError('The "path" parameter must not be empty')
|
||||
|
||||
self.path = path
|
||||
|
||||
if client:
|
||||
self.client = maybe_ref(client)
|
||||
else:
|
||||
self.client = Etcd3Client(**connect_args)
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
node_path = self.path + "/" + str(job_id)
|
||||
try:
|
||||
content, _ = self.client.get(node_path)
|
||||
content = pickle.loads(content)
|
||||
job = self._reconstitute_job(content["job_state"])
|
||||
return job
|
||||
except BaseException:
|
||||
return None
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
timestamp = datetime_to_utc_timestamp(now)
|
||||
jobs = [
|
||||
job_record["job"]
|
||||
for job_record in self._get_jobs()
|
||||
if job_record["next_run_time"] is not None
|
||||
and job_record["next_run_time"] <= timestamp
|
||||
]
|
||||
return jobs
|
||||
|
||||
def get_next_run_time(self):
|
||||
next_runs = [
|
||||
job_record["next_run_time"]
|
||||
for job_record in self._get_jobs()
|
||||
if job_record["next_run_time"] is not None
|
||||
]
|
||||
return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None
|
||||
|
||||
def get_all_jobs(self):
|
||||
jobs = [job_record["job"] for job_record in self._get_jobs()]
|
||||
self._fix_paused_jobs_sorting(jobs)
|
||||
return jobs
|
||||
|
||||
def add_job(self, job):
|
||||
node_path = self.path + "/" + str(job.id)
|
||||
value = {
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": job.__getstate__(),
|
||||
}
|
||||
data = pickle.dumps(value, self.pickle_protocol)
|
||||
status = self.client.put_if_not_exists(node_path, value=data)
|
||||
if not status:
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
def update_job(self, job):
|
||||
node_path = self.path + "/" + str(job.id)
|
||||
changes = {
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": job.__getstate__(),
|
||||
}
|
||||
data = pickle.dumps(changes, self.pickle_protocol)
|
||||
status, _ = self.client.transaction(
|
||||
compare=[self.client.transactions.version(node_path) > 0],
|
||||
success=[self.client.transactions.put(node_path, value=data)],
|
||||
failure=[],
|
||||
)
|
||||
if not status:
|
||||
raise JobLookupError(job.id)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
node_path = self.path + "/" + str(job_id)
|
||||
status, _ = self.client.transaction(
|
||||
compare=[self.client.transactions.version(node_path) > 0],
|
||||
success=[self.client.transactions.delete(node_path)],
|
||||
failure=[],
|
||||
)
|
||||
if not status:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
def remove_all_jobs(self):
|
||||
self.client.delete_prefix(self.path)
|
||||
|
||||
def shutdown(self):
|
||||
self.client.close()
|
||||
|
||||
def _reconstitute_job(self, job_state):
|
||||
job_state = job_state
|
||||
job = Job.__new__(Job)
|
||||
job.__setstate__(job_state)
|
||||
job._scheduler = self._scheduler
|
||||
job._jobstore_alias = self._alias
|
||||
return job
|
||||
|
||||
def _get_jobs(self):
|
||||
jobs = []
|
||||
failed_job_ids = []
|
||||
all_ids = list(self.client.get_prefix(self.path))
|
||||
|
||||
for doc, _ in all_ids:
|
||||
try:
|
||||
content = pickle.loads(doc)
|
||||
job_record = {
|
||||
"next_run_time": content["next_run_time"],
|
||||
"job": self._reconstitute_job(content["job_state"]),
|
||||
}
|
||||
jobs.append(job_record)
|
||||
except BaseException:
|
||||
content = pickle.loads(doc)
|
||||
failed_id = content["job_state"]["id"]
|
||||
failed_job_ids.append(failed_id)
|
||||
self._logger.exception(
|
||||
'Unable to restore job "%s" -- removing it', failed_id
|
||||
)
|
||||
|
||||
if failed_job_ids:
|
||||
for failed_id in failed_job_ids:
|
||||
self.remove_job(failed_id)
|
||||
paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
|
||||
return sorted(
|
||||
jobs,
|
||||
key=lambda job_record: job_record["job"].next_run_time or paused_sort_key,
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
self._logger.exception("<%s (client=%s)>", self.__class__.__name__, self.client)
|
||||
return f"<{self.__class__.__name__} (client={self.client})>"
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
|
||||
from apscheduler.util import datetime_to_utc_timestamp
|
||||
|
||||
|
||||
class MemoryJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in an array in RAM. Provides no persistence support.
|
||||
|
||||
Plugin alias: ``memory``
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# list of (job, timestamp), sorted by next_run_time and job id (ascending)
|
||||
self._jobs = []
|
||||
self._jobs_index = {} # id -> (job, timestamp) lookup table
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
return self._jobs_index.get(job_id, (None, None))[0]
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
now_timestamp = datetime_to_utc_timestamp(now)
|
||||
pending = []
|
||||
for job, timestamp in self._jobs:
|
||||
if timestamp is None or timestamp > now_timestamp:
|
||||
break
|
||||
pending.append(job)
|
||||
|
||||
return pending
|
||||
|
||||
def get_next_run_time(self):
|
||||
return self._jobs[0][0].next_run_time if self._jobs else None
|
||||
|
||||
def get_all_jobs(self):
|
||||
return [j[0] for j in self._jobs]
|
||||
|
||||
def add_job(self, job):
|
||||
if job.id in self._jobs_index:
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
timestamp = datetime_to_utc_timestamp(job.next_run_time)
|
||||
index = self._get_job_index(timestamp, job.id)
|
||||
self._jobs.insert(index, (job, timestamp))
|
||||
self._jobs_index[job.id] = (job, timestamp)
|
||||
|
||||
def update_job(self, job):
|
||||
old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
|
||||
if old_job is None:
|
||||
raise JobLookupError(job.id)
|
||||
|
||||
# If the next run time has not changed, simply replace the job in its present index.
|
||||
# Otherwise, reinsert the job to the list to preserve the ordering.
|
||||
old_index = self._get_job_index(old_timestamp, old_job.id)
|
||||
new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
|
||||
if old_timestamp == new_timestamp:
|
||||
self._jobs[old_index] = (job, new_timestamp)
|
||||
else:
|
||||
del self._jobs[old_index]
|
||||
new_index = self._get_job_index(new_timestamp, job.id)
|
||||
self._jobs.insert(new_index, (job, new_timestamp))
|
||||
|
||||
self._jobs_index[old_job.id] = (job, new_timestamp)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
job, timestamp = self._jobs_index.get(job_id, (None, None))
|
||||
if job is None:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
index = self._get_job_index(timestamp, job_id)
|
||||
del self._jobs[index]
|
||||
del self._jobs_index[job.id]
|
||||
|
||||
def remove_all_jobs(self):
|
||||
self._jobs = []
|
||||
self._jobs_index = {}
|
||||
|
||||
def shutdown(self):
|
||||
self.remove_all_jobs()
|
||||
|
||||
def _get_job_index(self, timestamp, job_id):
|
||||
"""
|
||||
Returns the index of the given job, or if it's not found, the index where the job should be
|
||||
inserted based on the given timestamp.
|
||||
|
||||
:type timestamp: int
|
||||
:type job_id: str
|
||||
|
||||
"""
|
||||
lo, hi = 0, len(self._jobs)
|
||||
timestamp = float("inf") if timestamp is None else timestamp
|
||||
while lo < hi:
|
||||
mid = (lo + hi) // 2
|
||||
mid_job, mid_timestamp = self._jobs[mid]
|
||||
mid_timestamp = float("inf") if mid_timestamp is None else mid_timestamp
|
||||
if mid_timestamp > timestamp:
|
||||
hi = mid
|
||||
elif mid_timestamp < timestamp:
|
||||
lo = mid + 1
|
||||
elif mid_job.id > job_id:
|
||||
hi = mid
|
||||
elif mid_job.id < job_id:
|
||||
lo = mid + 1
|
||||
else:
|
||||
return mid
|
||||
|
||||
return lo
|
||||
|
|
@ -0,0 +1,158 @@
|
|||
import pickle
|
||||
import warnings
|
||||
|
||||
from apscheduler.job import Job
|
||||
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
|
||||
from apscheduler.util import (
|
||||
datetime_to_utc_timestamp,
|
||||
maybe_ref,
|
||||
utc_timestamp_to_datetime,
|
||||
)
|
||||
|
||||
try:
|
||||
from bson.binary import Binary
|
||||
from pymongo import ASCENDING, MongoClient
|
||||
from pymongo.errors import DuplicateKeyError
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("MongoDBJobStore requires PyMongo installed") from exc
|
||||
|
||||
|
||||
class MongoDBJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to
|
||||
pymongo's `MongoClient
|
||||
<http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient>`_.
|
||||
|
||||
Plugin alias: ``mongodb``
|
||||
|
||||
:param str database: database to store jobs in
|
||||
:param str collection: collection to store jobs in
|
||||
:param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of
|
||||
providing connection arguments
|
||||
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
|
||||
highest available
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
database="apscheduler",
|
||||
collection="jobs",
|
||||
client=None,
|
||||
pickle_protocol=pickle.HIGHEST_PROTOCOL,
|
||||
**connect_args,
|
||||
):
|
||||
super().__init__()
|
||||
self.pickle_protocol = pickle_protocol
|
||||
|
||||
if not database:
|
||||
raise ValueError('The "database" parameter must not be empty')
|
||||
if not collection:
|
||||
raise ValueError('The "collection" parameter must not be empty')
|
||||
|
||||
if client:
|
||||
self.client = maybe_ref(client)
|
||||
else:
|
||||
connect_args.setdefault("w", 1)
|
||||
self.client = MongoClient(**connect_args)
|
||||
|
||||
self.collection = self.client[database][collection]
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
super().start(scheduler, alias)
|
||||
self.collection.create_index("next_run_time", sparse=True)
|
||||
|
||||
@property
|
||||
def connection(self):
|
||||
warnings.warn(
|
||||
'The "connection" member is deprecated -- use "client" instead',
|
||||
DeprecationWarning,
|
||||
)
|
||||
return self.client
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
document = self.collection.find_one(job_id, ["job_state"])
|
||||
return self._reconstitute_job(document["job_state"]) if document else None
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
timestamp = datetime_to_utc_timestamp(now)
|
||||
return self._get_jobs({"next_run_time": {"$lte": timestamp}})
|
||||
|
||||
def get_next_run_time(self):
|
||||
document = self.collection.find_one(
|
||||
{"next_run_time": {"$ne": None}},
|
||||
projection=["next_run_time"],
|
||||
sort=[("next_run_time", ASCENDING)],
|
||||
)
|
||||
return (
|
||||
utc_timestamp_to_datetime(document["next_run_time"]) if document else None
|
||||
)
|
||||
|
||||
def get_all_jobs(self):
|
||||
jobs = self._get_jobs({})
|
||||
self._fix_paused_jobs_sorting(jobs)
|
||||
return jobs
|
||||
|
||||
def add_job(self, job):
|
||||
try:
|
||||
self.collection.insert_one(
|
||||
{
|
||||
"_id": job.id,
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": Binary(
|
||||
pickle.dumps(job.__getstate__(), self.pickle_protocol)
|
||||
),
|
||||
}
|
||||
)
|
||||
except DuplicateKeyError:
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
def update_job(self, job):
|
||||
changes = {
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)),
|
||||
}
|
||||
result = self.collection.update_one({"_id": job.id}, {"$set": changes})
|
||||
if result and result.matched_count == 0:
|
||||
raise JobLookupError(job.id)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
result = self.collection.delete_one({"_id": job_id})
|
||||
if result and result.deleted_count == 0:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
def remove_all_jobs(self):
|
||||
self.collection.delete_many({})
|
||||
|
||||
def shutdown(self):
|
||||
self.client.close()
|
||||
|
||||
def _reconstitute_job(self, job_state):
|
||||
job_state = pickle.loads(job_state)
|
||||
job = Job.__new__(Job)
|
||||
job.__setstate__(job_state)
|
||||
job._scheduler = self._scheduler
|
||||
job._jobstore_alias = self._alias
|
||||
return job
|
||||
|
||||
def _get_jobs(self, conditions):
|
||||
jobs = []
|
||||
failed_job_ids = []
|
||||
for document in self.collection.find(
|
||||
conditions, ["_id", "job_state"], sort=[("next_run_time", ASCENDING)]
|
||||
):
|
||||
try:
|
||||
jobs.append(self._reconstitute_job(document["job_state"]))
|
||||
except BaseException:
|
||||
self._logger.exception(
|
||||
'Unable to restore job "%s" -- removing it', document["_id"]
|
||||
)
|
||||
failed_job_ids.append(document["_id"])
|
||||
|
||||
# Remove all the jobs we failed to restore
|
||||
if failed_job_ids:
|
||||
self.collection.delete_many({"_id": {"$in": failed_job_ids}})
|
||||
|
||||
return jobs
|
||||
|
||||
def __repr__(self):
|
||||
return f"<{self.__class__.__name__} (client={self.client})>"
|
||||
160
venv/lib/python3.11/site-packages/apscheduler/jobstores/redis.py
Normal file
160
venv/lib/python3.11/site-packages/apscheduler/jobstores/redis.py
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
import pickle
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from apscheduler.job import Job
|
||||
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
|
||||
from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime
|
||||
|
||||
try:
|
||||
from redis import Redis
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("RedisJobStore requires redis installed") from exc
|
||||
|
||||
|
||||
class RedisJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's
|
||||
:class:`~redis.StrictRedis`.
|
||||
|
||||
Plugin alias: ``redis``
|
||||
|
||||
:param int db: the database number to store jobs in
|
||||
:param str jobs_key: key to store jobs in
|
||||
:param str run_times_key: key to store the jobs' run times in
|
||||
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
|
||||
highest available
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
db=0,
|
||||
jobs_key="apscheduler.jobs",
|
||||
run_times_key="apscheduler.run_times",
|
||||
pickle_protocol=pickle.HIGHEST_PROTOCOL,
|
||||
**connect_args,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
if db is None:
|
||||
raise ValueError('The "db" parameter must not be empty')
|
||||
if not jobs_key:
|
||||
raise ValueError('The "jobs_key" parameter must not be empty')
|
||||
if not run_times_key:
|
||||
raise ValueError('The "run_times_key" parameter must not be empty')
|
||||
|
||||
self.pickle_protocol = pickle_protocol
|
||||
self.jobs_key = jobs_key
|
||||
self.run_times_key = run_times_key
|
||||
self.redis = Redis(db=int(db), **connect_args)
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
job_state = self.redis.hget(self.jobs_key, job_id)
|
||||
return self._reconstitute_job(job_state) if job_state else None
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
timestamp = datetime_to_utc_timestamp(now)
|
||||
job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
|
||||
if job_ids:
|
||||
job_states = self.redis.hmget(self.jobs_key, *job_ids)
|
||||
return self._reconstitute_jobs(zip(job_ids, job_states))
|
||||
return []
|
||||
|
||||
def get_next_run_time(self):
|
||||
next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
|
||||
if next_run_time:
|
||||
return utc_timestamp_to_datetime(next_run_time[0][1])
|
||||
|
||||
def get_all_jobs(self):
|
||||
job_states = self.redis.hgetall(self.jobs_key)
|
||||
jobs = self._reconstitute_jobs(job_states.items())
|
||||
paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
|
||||
return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)
|
||||
|
||||
def add_job(self, job):
|
||||
if self.redis.hexists(self.jobs_key, job.id):
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
pipe.hset(
|
||||
self.jobs_key,
|
||||
job.id,
|
||||
pickle.dumps(job.__getstate__(), self.pickle_protocol),
|
||||
)
|
||||
if job.next_run_time:
|
||||
pipe.zadd(
|
||||
self.run_times_key,
|
||||
{job.id: datetime_to_utc_timestamp(job.next_run_time)},
|
||||
)
|
||||
|
||||
pipe.execute()
|
||||
|
||||
def update_job(self, job):
|
||||
if not self.redis.hexists(self.jobs_key, job.id):
|
||||
raise JobLookupError(job.id)
|
||||
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.hset(
|
||||
self.jobs_key,
|
||||
job.id,
|
||||
pickle.dumps(job.__getstate__(), self.pickle_protocol),
|
||||
)
|
||||
if job.next_run_time:
|
||||
pipe.zadd(
|
||||
self.run_times_key,
|
||||
{job.id: datetime_to_utc_timestamp(job.next_run_time)},
|
||||
)
|
||||
else:
|
||||
pipe.zrem(self.run_times_key, job.id)
|
||||
|
||||
pipe.execute()
|
||||
|
||||
def remove_job(self, job_id):
|
||||
if not self.redis.hexists(self.jobs_key, job_id):
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.hdel(self.jobs_key, job_id)
|
||||
pipe.zrem(self.run_times_key, job_id)
|
||||
pipe.execute()
|
||||
|
||||
def remove_all_jobs(self):
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.delete(self.jobs_key)
|
||||
pipe.delete(self.run_times_key)
|
||||
pipe.execute()
|
||||
|
||||
def shutdown(self):
|
||||
self.redis.connection_pool.disconnect()
|
||||
|
||||
def _reconstitute_job(self, job_state):
|
||||
job_state = pickle.loads(job_state)
|
||||
job = Job.__new__(Job)
|
||||
job.__setstate__(job_state)
|
||||
job._scheduler = self._scheduler
|
||||
job._jobstore_alias = self._alias
|
||||
return job
|
||||
|
||||
def _reconstitute_jobs(self, job_states):
|
||||
jobs = []
|
||||
failed_job_ids = []
|
||||
for job_id, job_state in job_states:
|
||||
try:
|
||||
jobs.append(self._reconstitute_job(job_state))
|
||||
except BaseException:
|
||||
self._logger.exception(
|
||||
'Unable to restore job "%s" -- removing it', job_id
|
||||
)
|
||||
failed_job_ids.append(job_id)
|
||||
|
||||
# Remove all the jobs we failed to restore
|
||||
if failed_job_ids:
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.hdel(self.jobs_key, *failed_job_ids)
|
||||
pipe.zrem(self.run_times_key, *failed_job_ids)
|
||||
pipe.execute()
|
||||
|
||||
return jobs
|
||||
|
||||
def __repr__(self):
|
||||
return f"<{self.__class__.__name__}>"
|
||||
|
|
@ -0,0 +1,173 @@
|
|||
import pickle
|
||||
|
||||
from apscheduler.job import Job
|
||||
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
|
||||
from apscheduler.util import (
|
||||
datetime_to_utc_timestamp,
|
||||
maybe_ref,
|
||||
utc_timestamp_to_datetime,
|
||||
)
|
||||
|
||||
try:
|
||||
from rethinkdb import RethinkDB
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("RethinkDBJobStore requires rethinkdb installed") from exc
|
||||
|
||||
|
||||
class RethinkDBJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to
|
||||
rethinkdb's `RethinkdbClient <http://www.rethinkdb.com/api/#connect>`_.
|
||||
|
||||
Plugin alias: ``rethinkdb``
|
||||
|
||||
:param str database: database to store jobs in
|
||||
:param str collection: collection to store jobs in
|
||||
:param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing
|
||||
connection arguments
|
||||
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
|
||||
highest available
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
database="apscheduler",
|
||||
table="jobs",
|
||||
client=None,
|
||||
pickle_protocol=pickle.HIGHEST_PROTOCOL,
|
||||
**connect_args,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
if not database:
|
||||
raise ValueError('The "database" parameter must not be empty')
|
||||
if not table:
|
||||
raise ValueError('The "table" parameter must not be empty')
|
||||
|
||||
self.database = database
|
||||
self.table_name = table
|
||||
self.table = None
|
||||
self.client = client
|
||||
self.pickle_protocol = pickle_protocol
|
||||
self.connect_args = connect_args
|
||||
self.r = RethinkDB()
|
||||
self.conn = None
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
super().start(scheduler, alias)
|
||||
|
||||
if self.client:
|
||||
self.conn = maybe_ref(self.client)
|
||||
else:
|
||||
self.conn = self.r.connect(db=self.database, **self.connect_args)
|
||||
|
||||
if self.database not in self.r.db_list().run(self.conn):
|
||||
self.r.db_create(self.database).run(self.conn)
|
||||
|
||||
if self.table_name not in self.r.table_list().run(self.conn):
|
||||
self.r.table_create(self.table_name).run(self.conn)
|
||||
|
||||
if "next_run_time" not in self.r.table(self.table_name).index_list().run(
|
||||
self.conn
|
||||
):
|
||||
self.r.table(self.table_name).index_create("next_run_time").run(self.conn)
|
||||
|
||||
self.table = self.r.db(self.database).table(self.table_name)
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
results = list(self.table.get_all(job_id).pluck("job_state").run(self.conn))
|
||||
return self._reconstitute_job(results[0]["job_state"]) if results else None
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
return self._get_jobs(
|
||||
self.r.row["next_run_time"] <= datetime_to_utc_timestamp(now)
|
||||
)
|
||||
|
||||
def get_next_run_time(self):
|
||||
results = list(
|
||||
self.table.filter(self.r.row["next_run_time"] != None)
|
||||
.order_by(self.r.asc("next_run_time"))
|
||||
.map(lambda x: x["next_run_time"])
|
||||
.limit(1)
|
||||
.run(self.conn)
|
||||
)
|
||||
return utc_timestamp_to_datetime(results[0]) if results else None
|
||||
|
||||
def get_all_jobs(self):
|
||||
jobs = self._get_jobs()
|
||||
self._fix_paused_jobs_sorting(jobs)
|
||||
return jobs
|
||||
|
||||
def add_job(self, job):
|
||||
job_dict = {
|
||||
"id": job.id,
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": self.r.binary(
|
||||
pickle.dumps(job.__getstate__(), self.pickle_protocol)
|
||||
),
|
||||
}
|
||||
results = self.table.insert(job_dict).run(self.conn)
|
||||
if results["errors"] > 0:
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
def update_job(self, job):
|
||||
changes = {
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": self.r.binary(
|
||||
pickle.dumps(job.__getstate__(), self.pickle_protocol)
|
||||
),
|
||||
}
|
||||
results = self.table.get_all(job.id).update(changes).run(self.conn)
|
||||
skipped = False in map(lambda x: results[x] == 0, results.keys())
|
||||
if results["skipped"] > 0 or results["errors"] > 0 or not skipped:
|
||||
raise JobLookupError(job.id)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
results = self.table.get_all(job_id).delete().run(self.conn)
|
||||
if results["deleted"] + results["skipped"] != 1:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
def remove_all_jobs(self):
|
||||
self.table.delete().run(self.conn)
|
||||
|
||||
def shutdown(self):
|
||||
self.conn.close()
|
||||
|
||||
def _reconstitute_job(self, job_state):
|
||||
job_state = pickle.loads(job_state)
|
||||
job = Job.__new__(Job)
|
||||
job.__setstate__(job_state)
|
||||
job._scheduler = self._scheduler
|
||||
job._jobstore_alias = self._alias
|
||||
return job
|
||||
|
||||
def _get_jobs(self, predicate=None):
|
||||
jobs = []
|
||||
failed_job_ids = []
|
||||
query = (
|
||||
self.table.filter(self.r.row["next_run_time"] != None).filter(predicate)
|
||||
if predicate
|
||||
else self.table
|
||||
)
|
||||
query = query.order_by("next_run_time", "id").pluck("id", "job_state")
|
||||
|
||||
for document in query.run(self.conn):
|
||||
try:
|
||||
jobs.append(self._reconstitute_job(document["job_state"]))
|
||||
except Exception:
|
||||
self._logger.exception(
|
||||
'Unable to restore job "%s" -- removing it', document["id"]
|
||||
)
|
||||
failed_job_ids.append(document["id"])
|
||||
|
||||
# Remove all the jobs we failed to restore
|
||||
if failed_job_ids:
|
||||
self.r.expr(failed_job_ids).for_each(
|
||||
lambda job_id: self.table.get_all(job_id).delete()
|
||||
).run(self.conn)
|
||||
|
||||
return jobs
|
||||
|
||||
def __repr__(self):
|
||||
connection = self.conn
|
||||
return f"<{self.__class__.__name__} (connection={connection})>"
|
||||
|
|
@ -0,0 +1,194 @@
|
|||
import pickle
|
||||
|
||||
from apscheduler.job import Job
|
||||
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
|
||||
from apscheduler.util import (
|
||||
datetime_to_utc_timestamp,
|
||||
maybe_ref,
|
||||
utc_timestamp_to_datetime,
|
||||
)
|
||||
|
||||
try:
|
||||
from sqlalchemy import (
|
||||
Column,
|
||||
Float,
|
||||
LargeBinary,
|
||||
MetaData,
|
||||
Table,
|
||||
Unicode,
|
||||
and_,
|
||||
create_engine,
|
||||
select,
|
||||
)
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.sql.expression import null
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("SQLAlchemyJobStore requires SQLAlchemy installed") from exc
|
||||
|
||||
|
||||
class SQLAlchemyJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in a database table using SQLAlchemy.
|
||||
The table will be created if it doesn't exist in the database.
|
||||
|
||||
Plugin alias: ``sqlalchemy``
|
||||
|
||||
:param str url: connection string (see
|
||||
:ref:`SQLAlchemy documentation <sqlalchemy:database_urls>` on this)
|
||||
:param engine: an SQLAlchemy :class:`~sqlalchemy.engine.Engine` to use instead of creating a
|
||||
new one based on ``url``
|
||||
:param str tablename: name of the table to store jobs in
|
||||
:param metadata: a :class:`~sqlalchemy.schema.MetaData` instance to use instead of creating a
|
||||
new one
|
||||
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
|
||||
highest available
|
||||
:param str tableschema: name of the (existing) schema in the target database where the table
|
||||
should be
|
||||
:param dict engine_options: keyword arguments to :func:`~sqlalchemy.create_engine`
|
||||
(ignored if ``engine`` is given)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
url=None,
|
||||
engine=None,
|
||||
tablename="apscheduler_jobs",
|
||||
metadata=None,
|
||||
pickle_protocol=pickle.HIGHEST_PROTOCOL,
|
||||
tableschema=None,
|
||||
engine_options=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.pickle_protocol = pickle_protocol
|
||||
metadata = maybe_ref(metadata) or MetaData()
|
||||
|
||||
if engine:
|
||||
self.engine = maybe_ref(engine)
|
||||
elif url:
|
||||
self.engine = create_engine(url, **(engine_options or {}))
|
||||
else:
|
||||
raise ValueError('Need either "engine" or "url" defined')
|
||||
|
||||
# 191 = max key length in MySQL for InnoDB/utf8mb4 tables,
|
||||
# 25 = precision that translates to an 8-byte float
|
||||
self.jobs_t = Table(
|
||||
tablename,
|
||||
metadata,
|
||||
Column("id", Unicode(191), primary_key=True),
|
||||
Column("next_run_time", Float(25), index=True),
|
||||
Column("job_state", LargeBinary, nullable=False),
|
||||
schema=tableschema,
|
||||
)
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
super().start(scheduler, alias)
|
||||
self.jobs_t.create(self.engine, True)
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
selectable = select(self.jobs_t.c.job_state).where(self.jobs_t.c.id == job_id)
|
||||
with self.engine.begin() as connection:
|
||||
job_state = connection.execute(selectable).scalar()
|
||||
return self._reconstitute_job(job_state) if job_state else None
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
timestamp = datetime_to_utc_timestamp(now)
|
||||
return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
|
||||
|
||||
def get_next_run_time(self):
|
||||
selectable = (
|
||||
select(self.jobs_t.c.next_run_time)
|
||||
.where(self.jobs_t.c.next_run_time != null())
|
||||
.order_by(self.jobs_t.c.next_run_time)
|
||||
.limit(1)
|
||||
)
|
||||
with self.engine.begin() as connection:
|
||||
next_run_time = connection.execute(selectable).scalar()
|
||||
return utc_timestamp_to_datetime(next_run_time)
|
||||
|
||||
def get_all_jobs(self):
|
||||
jobs = self._get_jobs()
|
||||
self._fix_paused_jobs_sorting(jobs)
|
||||
return jobs
|
||||
|
||||
def add_job(self, job):
|
||||
insert = self.jobs_t.insert().values(
|
||||
**{
|
||||
"id": job.id,
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": pickle.dumps(job.__getstate__(), self.pickle_protocol),
|
||||
}
|
||||
)
|
||||
with self.engine.begin() as connection:
|
||||
try:
|
||||
connection.execute(insert)
|
||||
except IntegrityError:
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
def update_job(self, job):
|
||||
update = (
|
||||
self.jobs_t.update()
|
||||
.values(
|
||||
**{
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": pickle.dumps(job.__getstate__(), self.pickle_protocol),
|
||||
}
|
||||
)
|
||||
.where(self.jobs_t.c.id == job.id)
|
||||
)
|
||||
with self.engine.begin() as connection:
|
||||
result = connection.execute(update)
|
||||
if result.rowcount == 0:
|
||||
raise JobLookupError(job.id)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
|
||||
with self.engine.begin() as connection:
|
||||
result = connection.execute(delete)
|
||||
if result.rowcount == 0:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
def remove_all_jobs(self):
|
||||
delete = self.jobs_t.delete()
|
||||
with self.engine.begin() as connection:
|
||||
connection.execute(delete)
|
||||
|
||||
def shutdown(self):
|
||||
self.engine.dispose()
|
||||
|
||||
def _reconstitute_job(self, job_state):
|
||||
job_state = pickle.loads(job_state)
|
||||
job_state["jobstore"] = self
|
||||
job = Job.__new__(Job)
|
||||
job.__setstate__(job_state)
|
||||
job._scheduler = self._scheduler
|
||||
job._jobstore_alias = self._alias
|
||||
return job
|
||||
|
||||
def _get_jobs(self, *conditions):
|
||||
jobs = []
|
||||
selectable = select(self.jobs_t.c.id, self.jobs_t.c.job_state).order_by(
|
||||
self.jobs_t.c.next_run_time
|
||||
)
|
||||
selectable = selectable.where(and_(*conditions)) if conditions else selectable
|
||||
failed_job_ids = set()
|
||||
with self.engine.begin() as connection:
|
||||
for row in connection.execute(selectable):
|
||||
try:
|
||||
jobs.append(self._reconstitute_job(row.job_state))
|
||||
except BaseException:
|
||||
self._logger.exception(
|
||||
'Unable to restore job "%s" -- removing it', row.id
|
||||
)
|
||||
failed_job_ids.add(row.id)
|
||||
|
||||
# Remove all the jobs we failed to restore
|
||||
if failed_job_ids:
|
||||
delete = self.jobs_t.delete().where(
|
||||
self.jobs_t.c.id.in_(failed_job_ids)
|
||||
)
|
||||
connection.execute(delete)
|
||||
|
||||
return jobs
|
||||
|
||||
def __repr__(self):
|
||||
return f"<{self.__class__.__name__} (url={self.engine.url})>"
|
||||
|
|
@ -0,0 +1,197 @@
|
|||
import pickle
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from kazoo.exceptions import NodeExistsError, NoNodeError
|
||||
|
||||
from apscheduler.job import Job
|
||||
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
|
||||
from apscheduler.util import (
|
||||
datetime_to_utc_timestamp,
|
||||
maybe_ref,
|
||||
utc_timestamp_to_datetime,
|
||||
)
|
||||
|
||||
try:
|
||||
from kazoo.client import KazooClient
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("ZooKeeperJobStore requires Kazoo installed") from exc
|
||||
|
||||
|
||||
class ZooKeeperJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to
|
||||
kazoo's `KazooClient
|
||||
<http://kazoo.readthedocs.io/en/latest/api/client.html>`_.
|
||||
|
||||
Plugin alias: ``zookeeper``
|
||||
|
||||
:param str path: path to store jobs in
|
||||
:param client: a :class:`~kazoo.client.KazooClient` instance to use instead of
|
||||
providing connection arguments
|
||||
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
|
||||
highest available
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path="/apscheduler",
|
||||
client=None,
|
||||
close_connection_on_exit=False,
|
||||
pickle_protocol=pickle.HIGHEST_PROTOCOL,
|
||||
**connect_args,
|
||||
):
|
||||
super().__init__()
|
||||
self.pickle_protocol = pickle_protocol
|
||||
self.close_connection_on_exit = close_connection_on_exit
|
||||
|
||||
if not path:
|
||||
raise ValueError('The "path" parameter must not be empty')
|
||||
|
||||
self.path = path
|
||||
|
||||
if client:
|
||||
self.client = maybe_ref(client)
|
||||
else:
|
||||
self.client = KazooClient(**connect_args)
|
||||
self._ensured_path = False
|
||||
|
||||
def _ensure_paths(self):
|
||||
if not self._ensured_path:
|
||||
self.client.ensure_path(self.path)
|
||||
self._ensured_path = True
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
super().start(scheduler, alias)
|
||||
if not self.client.connected:
|
||||
self.client.start()
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
self._ensure_paths()
|
||||
node_path = self.path + "/" + str(job_id)
|
||||
try:
|
||||
content, _ = self.client.get(node_path)
|
||||
doc = pickle.loads(content)
|
||||
job = self._reconstitute_job(doc["job_state"])
|
||||
return job
|
||||
except BaseException:
|
||||
return None
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
timestamp = datetime_to_utc_timestamp(now)
|
||||
jobs = [
|
||||
job_def["job"]
|
||||
for job_def in self._get_jobs()
|
||||
if job_def["next_run_time"] is not None
|
||||
and job_def["next_run_time"] <= timestamp
|
||||
]
|
||||
return jobs
|
||||
|
||||
def get_next_run_time(self):
|
||||
next_runs = [
|
||||
job_def["next_run_time"]
|
||||
for job_def in self._get_jobs()
|
||||
if job_def["next_run_time"] is not None
|
||||
]
|
||||
return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None
|
||||
|
||||
def get_all_jobs(self):
|
||||
jobs = [job_def["job"] for job_def in self._get_jobs()]
|
||||
self._fix_paused_jobs_sorting(jobs)
|
||||
return jobs
|
||||
|
||||
def add_job(self, job):
|
||||
self._ensure_paths()
|
||||
node_path = self.path + "/" + str(job.id)
|
||||
value = {
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": job.__getstate__(),
|
||||
}
|
||||
data = pickle.dumps(value, self.pickle_protocol)
|
||||
try:
|
||||
self.client.create(node_path, value=data)
|
||||
except NodeExistsError:
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
def update_job(self, job):
|
||||
self._ensure_paths()
|
||||
node_path = self.path + "/" + str(job.id)
|
||||
changes = {
|
||||
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
|
||||
"job_state": job.__getstate__(),
|
||||
}
|
||||
data = pickle.dumps(changes, self.pickle_protocol)
|
||||
try:
|
||||
self.client.set(node_path, value=data)
|
||||
except NoNodeError:
|
||||
raise JobLookupError(job.id)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
self._ensure_paths()
|
||||
node_path = self.path + "/" + str(job_id)
|
||||
try:
|
||||
self.client.delete(node_path)
|
||||
except NoNodeError:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
def remove_all_jobs(self):
|
||||
try:
|
||||
self.client.delete(self.path, recursive=True)
|
||||
except NoNodeError:
|
||||
pass
|
||||
self._ensured_path = False
|
||||
|
||||
def shutdown(self):
|
||||
if self.close_connection_on_exit:
|
||||
self.client.stop()
|
||||
self.client.close()
|
||||
|
||||
def _reconstitute_job(self, job_state):
|
||||
job_state = job_state
|
||||
job = Job.__new__(Job)
|
||||
job.__setstate__(job_state)
|
||||
job._scheduler = self._scheduler
|
||||
job._jobstore_alias = self._alias
|
||||
return job
|
||||
|
||||
def _get_jobs(self):
|
||||
self._ensure_paths()
|
||||
jobs = []
|
||||
failed_job_ids = []
|
||||
all_ids = self.client.get_children(self.path)
|
||||
for node_name in all_ids:
|
||||
try:
|
||||
node_path = self.path + "/" + node_name
|
||||
content, _ = self.client.get(node_path)
|
||||
doc = pickle.loads(content)
|
||||
job_def = {
|
||||
"job_id": node_name,
|
||||
"next_run_time": doc["next_run_time"]
|
||||
if doc["next_run_time"]
|
||||
else None,
|
||||
"job_state": doc["job_state"],
|
||||
"job": self._reconstitute_job(doc["job_state"]),
|
||||
"creation_time": _.ctime,
|
||||
}
|
||||
jobs.append(job_def)
|
||||
except BaseException:
|
||||
self._logger.exception(
|
||||
'Unable to restore job "%s" -- removing it', node_name
|
||||
)
|
||||
failed_job_ids.append(node_name)
|
||||
|
||||
# Remove all the jobs we failed to restore
|
||||
if failed_job_ids:
|
||||
for failed_id in failed_job_ids:
|
||||
self.remove_job(failed_id)
|
||||
paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
|
||||
return sorted(
|
||||
jobs,
|
||||
key=lambda job_def: (
|
||||
job_def["job"].next_run_time or paused_sort_key,
|
||||
job_def["creation_time"],
|
||||
),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
self._logger.exception("<%s (client=%s)>", self.__class__.__name__, self.client)
|
||||
return f"<{self.__class__.__name__} (client={self.client})>"
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
class SchedulerAlreadyRunningError(Exception):
|
||||
"""Raised when attempting to start or configure the scheduler when it's already running."""
|
||||
|
||||
def __str__(self):
|
||||
return "Scheduler is already running"
|
||||
|
||||
|
||||
class SchedulerNotRunningError(Exception):
|
||||
"""Raised when attempting to shutdown the scheduler when it's not running."""
|
||||
|
||||
def __str__(self):
|
||||
return "Scheduler is not running"
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
import asyncio
|
||||
from functools import partial, wraps
|
||||
|
||||
from apscheduler.schedulers import SchedulerNotRunningError
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.util import maybe_ref
|
||||
|
||||
|
||||
def run_in_event_loop(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
wrapped = partial(func, self, *args, **kwargs)
|
||||
self._eventloop.call_soon_threadsafe(wrapped)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class AsyncIOScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs on an asyncio (:pep:`3156`) event loop.
|
||||
|
||||
The default executor can run jobs based on native coroutines (``async def``).
|
||||
|
||||
Extra options:
|
||||
|
||||
============== =============================================================
|
||||
``event_loop`` AsyncIO event loop to use (defaults to the global event loop)
|
||||
============== =============================================================
|
||||
"""
|
||||
|
||||
_eventloop = None
|
||||
_timeout = None
|
||||
|
||||
def start(self, paused=False):
|
||||
if not self._eventloop or self._eventloop.is_closed():
|
||||
self._eventloop = asyncio.get_running_loop()
|
||||
|
||||
super().start(paused)
|
||||
|
||||
@run_in_event_loop
|
||||
def _shutdown(self, wait=True):
|
||||
super().shutdown(wait)
|
||||
self._stop_timer()
|
||||
self._eventloop = None
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
if not self.running:
|
||||
raise SchedulerNotRunningError
|
||||
|
||||
self._shutdown(wait)
|
||||
|
||||
def _configure(self, config):
|
||||
self._eventloop = maybe_ref(config.pop("event_loop", None))
|
||||
super()._configure(config)
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timeout:
|
||||
self._timeout.cancel()
|
||||
del self._timeout
|
||||
|
||||
@run_in_event_loop
|
||||
def wakeup(self):
|
||||
self._stop_timer()
|
||||
wait_seconds = self._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.asyncio import AsyncIOExecutor
|
||||
|
||||
return AsyncIOExecutor()
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
from threading import Event, Thread
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.schedulers.blocking import BlockingScheduler
|
||||
from apscheduler.util import asbool
|
||||
|
||||
|
||||
class BackgroundScheduler(BlockingScheduler):
|
||||
"""
|
||||
A scheduler that runs in the background using a separate thread
|
||||
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately).
|
||||
|
||||
Extra options:
|
||||
|
||||
========== =============================================================================
|
||||
``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see
|
||||
`the documentation
|
||||
<https://docs.python.org/3.4/library/threading.html#thread-objects>`_
|
||||
for further details)
|
||||
========== =============================================================================
|
||||
"""
|
||||
|
||||
_thread = None
|
||||
|
||||
def _configure(self, config):
|
||||
self._daemon = asbool(config.pop("daemon", True))
|
||||
super()._configure(config)
|
||||
|
||||
def start(self, *args, **kwargs):
|
||||
if self._event is None or self._event.is_set():
|
||||
self._event = Event()
|
||||
|
||||
BaseScheduler.start(self, *args, **kwargs)
|
||||
self._thread = Thread(
|
||||
target=self._main_loop, name="APScheduler", daemon=self._daemon
|
||||
)
|
||||
self._thread.start()
|
||||
|
||||
def shutdown(self, *args, **kwargs):
|
||||
super().shutdown(*args, **kwargs)
|
||||
self._thread.join()
|
||||
del self._thread
|
||||
1264
venv/lib/python3.11/site-packages/apscheduler/schedulers/base.py
Normal file
1264
venv/lib/python3.11/site-packages/apscheduler/schedulers/base.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,33 @@
|
|||
from threading import TIMEOUT_MAX, Event
|
||||
|
||||
from apscheduler.schedulers.base import STATE_STOPPED, BaseScheduler
|
||||
|
||||
|
||||
class BlockingScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs in the foreground
|
||||
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block).
|
||||
"""
|
||||
|
||||
_event = None
|
||||
|
||||
def start(self, *args, **kwargs):
|
||||
if self._event is None or self._event.is_set():
|
||||
self._event = Event()
|
||||
|
||||
super().start(*args, **kwargs)
|
||||
self._main_loop()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
super().shutdown(wait)
|
||||
self._event.set()
|
||||
|
||||
def _main_loop(self):
|
||||
wait_seconds = TIMEOUT_MAX
|
||||
while self.state != STATE_STOPPED:
|
||||
self._event.wait(wait_seconds)
|
||||
self._event.clear()
|
||||
wait_seconds = self._process_jobs()
|
||||
|
||||
def wakeup(self):
|
||||
self._event.set()
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.schedulers.blocking import BlockingScheduler
|
||||
|
||||
try:
|
||||
import gevent
|
||||
from gevent.event import Event
|
||||
from gevent.lock import RLock
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("GeventScheduler requires gevent installed") from exc
|
||||
|
||||
|
||||
class GeventScheduler(BlockingScheduler):
|
||||
"""A scheduler that runs as a Gevent greenlet."""
|
||||
|
||||
_greenlet = None
|
||||
|
||||
def start(self, *args, **kwargs):
|
||||
self._event = Event()
|
||||
BaseScheduler.start(self, *args, **kwargs)
|
||||
self._greenlet = gevent.spawn(self._main_loop)
|
||||
return self._greenlet
|
||||
|
||||
def shutdown(self, *args, **kwargs):
|
||||
super().shutdown(*args, **kwargs)
|
||||
self._greenlet.join()
|
||||
del self._greenlet
|
||||
|
||||
def _create_lock(self):
|
||||
return RLock()
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.gevent import GeventExecutor
|
||||
|
||||
return GeventExecutor()
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
from importlib import import_module
|
||||
from itertools import product
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
for version, pkgname in product(range(6, 1, -1), ("PySide", "PyQt")):
|
||||
try:
|
||||
qtcore = import_module(pkgname + str(version) + ".QtCore")
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
QTimer = qtcore.QTimer
|
||||
break
|
||||
else:
|
||||
raise ImportError("QtScheduler requires either PySide/PyQt (v6 to v2) installed")
|
||||
|
||||
|
||||
class QtScheduler(BaseScheduler):
|
||||
"""A scheduler that runs in a Qt event loop."""
|
||||
|
||||
_timer = None
|
||||
|
||||
def shutdown(self, *args, **kwargs):
|
||||
super().shutdown(*args, **kwargs)
|
||||
self._stop_timer()
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
wait_time = min(int(wait_seconds * 1000), 2147483647)
|
||||
self._timer = QTimer.singleShot(wait_time, self._process_jobs)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timer:
|
||||
if self._timer.isActive():
|
||||
self._timer.stop()
|
||||
del self._timer
|
||||
|
||||
def wakeup(self):
|
||||
self._start_timer(0)
|
||||
|
||||
def _process_jobs(self):
|
||||
wait_seconds = super()._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
from datetime import timedelta
|
||||
from functools import wraps
|
||||
|
||||
from apscheduler.schedulers import SchedulerNotRunningError
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.util import maybe_ref
|
||||
|
||||
try:
|
||||
from tornado.ioloop import IOLoop
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("TornadoScheduler requires tornado installed") from exc
|
||||
|
||||
|
||||
def run_in_ioloop(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
if self._ioloop is None:
|
||||
raise SchedulerNotRunningError
|
||||
|
||||
self._ioloop.add_callback(func, self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class TornadoScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs on a Tornado IOLoop.
|
||||
|
||||
The default executor can run jobs based on native coroutines (``async def``).
|
||||
|
||||
=========== ===============================================================
|
||||
``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop)
|
||||
=========== ===============================================================
|
||||
"""
|
||||
|
||||
_ioloop = None
|
||||
_timeout = None
|
||||
|
||||
@run_in_ioloop
|
||||
def _shutdown(self, wait=True):
|
||||
super().shutdown(wait)
|
||||
self._stop_timer()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
if not self.running:
|
||||
raise SchedulerNotRunningError
|
||||
|
||||
self._shutdown(wait)
|
||||
|
||||
def _configure(self, config):
|
||||
self._ioloop = maybe_ref(config.pop("io_loop", None)) or IOLoop.current()
|
||||
super()._configure(config)
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._timeout = self._ioloop.add_timeout(
|
||||
timedelta(seconds=wait_seconds), self.wakeup
|
||||
)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timeout:
|
||||
self._ioloop.remove_timeout(self._timeout)
|
||||
del self._timeout
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.tornado import TornadoExecutor
|
||||
|
||||
return TornadoExecutor()
|
||||
|
||||
@run_in_ioloop
|
||||
def wakeup(self):
|
||||
self._stop_timer()
|
||||
wait_seconds = self._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
from functools import wraps
|
||||
|
||||
from apscheduler.schedulers import SchedulerNotRunningError
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.util import maybe_ref
|
||||
|
||||
try:
|
||||
from twisted.internet import reactor as default_reactor
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("TwistedScheduler requires Twisted installed") from exc
|
||||
|
||||
|
||||
def run_in_reactor(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
self._reactor.callFromThread(func, self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class TwistedScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs on a Twisted reactor.
|
||||
|
||||
Extra options:
|
||||
|
||||
=========== ========================================================
|
||||
``reactor`` Reactor instance to use (defaults to the global reactor)
|
||||
=========== ========================================================
|
||||
"""
|
||||
|
||||
_reactor = None
|
||||
_delayedcall = None
|
||||
|
||||
def _configure(self, config):
|
||||
self._reactor = maybe_ref(config.pop("reactor", default_reactor))
|
||||
super()._configure(config)
|
||||
|
||||
@run_in_reactor
|
||||
def _shutdown(self, wait=True):
|
||||
super().shutdown(wait)
|
||||
self._stop_timer()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
if not self.running:
|
||||
raise SchedulerNotRunningError
|
||||
|
||||
self._shutdown(wait)
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._delayedcall and self._delayedcall.active():
|
||||
self._delayedcall.cancel()
|
||||
del self._delayedcall
|
||||
|
||||
@run_in_reactor
|
||||
def wakeup(self):
|
||||
self._stop_timer()
|
||||
wait_seconds = self._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.twisted import TwistedExecutor
|
||||
|
||||
return TwistedExecutor()
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
import random
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
class BaseTrigger(metaclass=ABCMeta):
|
||||
"""Abstract base class that defines the interface that every trigger must implement."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
"""
|
||||
Returns the next datetime to fire on, If no such datetime can be calculated, returns
|
||||
``None``.
|
||||
|
||||
:param datetime.datetime previous_fire_time: the previous time the trigger was fired
|
||||
:param datetime.datetime now: current datetime
|
||||
"""
|
||||
|
||||
def _apply_jitter(self, next_fire_time, jitter, now):
|
||||
"""
|
||||
Randomize ``next_fire_time`` by adding a random value (the jitter).
|
||||
|
||||
:param datetime.datetime|None next_fire_time: next fire time without jitter applied. If
|
||||
``None``, returns ``None``.
|
||||
:param int|None jitter: maximum number of seconds to add to ``next_fire_time``
|
||||
(if ``None`` or ``0``, returns ``next_fire_time``)
|
||||
:param datetime.datetime now: current datetime
|
||||
:return datetime.datetime|None: next fire time with a jitter.
|
||||
"""
|
||||
if next_fire_time is None or not jitter:
|
||||
return next_fire_time
|
||||
|
||||
return next_fire_time + timedelta(seconds=random.uniform(0, jitter))
|
||||
|
|
@ -0,0 +1,186 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from datetime import date, datetime, time, timedelta, tzinfo
|
||||
from typing import Any
|
||||
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.util import (
|
||||
asdate,
|
||||
astimezone,
|
||||
timezone_repr,
|
||||
)
|
||||
|
||||
|
||||
class CalendarIntervalTrigger(BaseTrigger):
|
||||
"""
|
||||
Runs the task on specified calendar-based intervals always at the same exact time of
|
||||
day.
|
||||
|
||||
When calculating the next date, the ``years`` and ``months`` parameters are first
|
||||
added to the previous date while keeping the day of the month constant. This is
|
||||
repeated until the resulting date is valid. After that, the ``weeks`` and ``days``
|
||||
parameters are added to that date. Finally, the date is combined with the given time
|
||||
(hour, minute, second) to form the final datetime.
|
||||
|
||||
This means that if the ``days`` or ``weeks`` parameters are not used, the task will
|
||||
always be executed on the same day of the month at the same wall clock time,
|
||||
assuming the date and time are valid.
|
||||
|
||||
If the resulting datetime is invalid due to a daylight saving forward shift, the
|
||||
date is discarded and the process moves on to the next date. If instead the datetime
|
||||
is ambiguous due to a backward DST shift, the earlier of the two resulting datetimes
|
||||
is used.
|
||||
|
||||
If no previous run time is specified when requesting a new run time (like when
|
||||
starting for the first time or resuming after being paused), ``start_date`` is used
|
||||
as a reference and the next valid datetime equal to or later than the current time
|
||||
will be returned. Otherwise, the next valid datetime starting from the previous run
|
||||
time is returned, even if it's in the past.
|
||||
|
||||
.. warning:: Be wary of setting a start date near the end of the month (29. – 31.)
|
||||
if you have ``months`` specified in your interval, as this will skip the months
|
||||
when those days do not exist. Likewise, setting the start date on the leap day
|
||||
(February 29th) and having ``years`` defined may cause some years to be skipped.
|
||||
|
||||
Users are also discouraged from using a time inside the target timezone's DST
|
||||
switching period (typically around 2 am) since a date could either be skipped or
|
||||
repeated due to the specified wall clock time either occurring twice or not at
|
||||
all.
|
||||
|
||||
:param years: number of years to wait
|
||||
:param months: number of months to wait
|
||||
:param weeks: number of weeks to wait
|
||||
:param days: number of days to wait
|
||||
:param hour: hour to run the task at
|
||||
:param minute: minute to run the task at
|
||||
:param second: second to run the task at
|
||||
:param start_date: first date to trigger on (defaults to current date if omitted)
|
||||
:param end_date: latest possible date to trigger on
|
||||
:param timezone: time zone to use for calculating the next fire time (defaults
|
||||
to scheduler timezone if created via the scheduler, otherwise the local time
|
||||
zone)
|
||||
:param jitter: delay the job execution by ``jitter`` seconds at most
|
||||
"""
|
||||
|
||||
__slots__ = (
|
||||
"_time",
|
||||
"days",
|
||||
"end_date",
|
||||
"jitter",
|
||||
"months",
|
||||
"start_date",
|
||||
"timezone",
|
||||
"weeks",
|
||||
"years",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
years: int = 0,
|
||||
months: int = 0,
|
||||
weeks: int = 0,
|
||||
days: int = 0,
|
||||
hour: int = 0,
|
||||
minute: int = 0,
|
||||
second: int = 0,
|
||||
start_date: date | str | None = None,
|
||||
end_date: date | str | None = None,
|
||||
timezone: str | tzinfo | None = None,
|
||||
jitter: int | None = None,
|
||||
):
|
||||
if timezone:
|
||||
self.timezone = astimezone(timezone)
|
||||
else:
|
||||
self.timezone = astimezone(get_localzone())
|
||||
|
||||
self.years = years
|
||||
self.months = months
|
||||
self.weeks = weeks
|
||||
self.days = days
|
||||
self.start_date = asdate(start_date) or date.today()
|
||||
self.end_date = asdate(end_date)
|
||||
self.jitter = jitter
|
||||
self._time = time(hour, minute, second, tzinfo=self.timezone)
|
||||
|
||||
if self.years == self.months == self.weeks == self.days == 0:
|
||||
raise ValueError("interval must be at least 1 day long")
|
||||
|
||||
if self.end_date and self.start_date > self.end_date:
|
||||
raise ValueError("end_date cannot be earlier than start_date")
|
||||
|
||||
def get_next_fire_time(
|
||||
self, previous_fire_time: datetime | None, now: datetime
|
||||
) -> datetime | None:
|
||||
while True:
|
||||
if previous_fire_time:
|
||||
year, month = previous_fire_time.year, previous_fire_time.month
|
||||
while True:
|
||||
month += self.months
|
||||
year += self.years + (month - 1) // 12
|
||||
month = (month - 1) % 12 + 1
|
||||
try:
|
||||
next_date = date(year, month, previous_fire_time.day)
|
||||
except ValueError:
|
||||
pass # Nonexistent date
|
||||
else:
|
||||
next_date += timedelta(self.days + self.weeks * 7)
|
||||
break
|
||||
else:
|
||||
next_date = self.start_date
|
||||
|
||||
# Don't return any date past end_date
|
||||
if self.end_date and next_date > self.end_date:
|
||||
return None
|
||||
|
||||
# Combine the date with the designated time and normalize the result
|
||||
timestamp = datetime.combine(next_date, self._time).timestamp()
|
||||
next_time = datetime.fromtimestamp(timestamp, self.timezone)
|
||||
|
||||
# Check if the time is off due to normalization and a forward DST shift
|
||||
if next_time.timetz() != self._time:
|
||||
previous_fire_time = next_time.date()
|
||||
else:
|
||||
return self._apply_jitter(next_time, self.jitter, now)
|
||||
|
||||
def __getstate__(self) -> dict[str, Any]:
|
||||
return {
|
||||
"version": 1,
|
||||
"interval": [self.years, self.months, self.weeks, self.days],
|
||||
"time": [self._time.hour, self._time.minute, self._time.second],
|
||||
"start_date": self.start_date,
|
||||
"end_date": self.end_date,
|
||||
"timezone": self.timezone,
|
||||
"jitter": self.jitter,
|
||||
}
|
||||
|
||||
def __setstate__(self, state: dict[str, Any]) -> None:
|
||||
if state.get("version", 1) > 1:
|
||||
raise ValueError(
|
||||
f"Got serialized data for version {state['version']} of "
|
||||
f"{self.__class__.__name__}, but only versions up to 1 can be handled"
|
||||
)
|
||||
|
||||
self.years, self.months, self.weeks, self.days = state["interval"]
|
||||
self.start_date = state["start_date"]
|
||||
self.end_date = state["end_date"]
|
||||
self.timezone = state["timezone"]
|
||||
self.jitter = state["jitter"]
|
||||
self._time = time(*state["time"], tzinfo=self.timezone)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
fields = []
|
||||
for field in "years", "months", "weeks", "days":
|
||||
value = getattr(self, field)
|
||||
if value > 0:
|
||||
fields.append(f"{field}={value}")
|
||||
|
||||
fields.append(f"time={self._time.isoformat()!r}")
|
||||
fields.append(f"start_date='{self.start_date}'")
|
||||
if self.end_date:
|
||||
fields.append(f"end_date='{self.end_date}'")
|
||||
|
||||
fields.append(f"timezone={timezone_repr(self.timezone)!r}")
|
||||
return f"{self.__class__.__name__}({', '.join(fields)})"
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.util import obj_to_ref, ref_to_obj
|
||||
|
||||
|
||||
class BaseCombiningTrigger(BaseTrigger):
|
||||
__slots__ = ("jitter", "triggers")
|
||||
|
||||
def __init__(self, triggers, jitter=None):
|
||||
self.triggers = triggers
|
||||
self.jitter = jitter
|
||||
|
||||
def __getstate__(self):
|
||||
return {
|
||||
"version": 1,
|
||||
"triggers": [
|
||||
(obj_to_ref(trigger.__class__), trigger.__getstate__())
|
||||
for trigger in self.triggers
|
||||
],
|
||||
"jitter": self.jitter,
|
||||
}
|
||||
|
||||
def __setstate__(self, state):
|
||||
if state.get("version", 1) > 1:
|
||||
raise ValueError(
|
||||
f"Got serialized data for version {state['version']} of "
|
||||
f"{self.__class__.__name__}, but only versions up to 1 can be handled"
|
||||
)
|
||||
|
||||
self.jitter = state["jitter"]
|
||||
self.triggers = []
|
||||
for clsref, state in state["triggers"]:
|
||||
cls = ref_to_obj(clsref)
|
||||
trigger = cls.__new__(cls)
|
||||
trigger.__setstate__(state)
|
||||
self.triggers.append(trigger)
|
||||
|
||||
def __repr__(self):
|
||||
return "<{}({}{})>".format(
|
||||
self.__class__.__name__,
|
||||
self.triggers,
|
||||
f", jitter={self.jitter}" if self.jitter else "",
|
||||
)
|
||||
|
||||
|
||||
class AndTrigger(BaseCombiningTrigger):
|
||||
"""
|
||||
Always returns the earliest next fire time that all the given triggers can agree on.
|
||||
The trigger is considered to be finished when any of the given triggers has finished its
|
||||
schedule.
|
||||
|
||||
Trigger alias: ``and``
|
||||
|
||||
.. warning:: This trigger should only be used to combine triggers that fire on
|
||||
specific times of day, such as
|
||||
:class:`~apscheduler.triggers.cron.CronTrigger` and
|
||||
class:`~apscheduler.triggers.calendarinterval.CalendarIntervalTrigger`.
|
||||
Attempting to use it with
|
||||
:class:`~apscheduler.triggers.interval.IntervalTrigger` will likely result in
|
||||
the scheduler hanging as it tries to find a fire time that matches exactly
|
||||
between fire times produced by all the given triggers.
|
||||
|
||||
:param list triggers: triggers to combine
|
||||
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
while True:
|
||||
fire_times = [
|
||||
trigger.get_next_fire_time(previous_fire_time, now)
|
||||
for trigger in self.triggers
|
||||
]
|
||||
if None in fire_times:
|
||||
return None
|
||||
elif min(fire_times) == max(fire_times):
|
||||
return self._apply_jitter(fire_times[0], self.jitter, now)
|
||||
else:
|
||||
now = max(fire_times)
|
||||
|
||||
def __str__(self):
|
||||
return "and[{}]".format(", ".join(str(trigger) for trigger in self.triggers))
|
||||
|
||||
|
||||
class OrTrigger(BaseCombiningTrigger):
|
||||
"""
|
||||
Always returns the earliest next fire time produced by any of the given triggers.
|
||||
The trigger is considered finished when all the given triggers have finished their schedules.
|
||||
|
||||
Trigger alias: ``or``
|
||||
|
||||
:param list triggers: triggers to combine
|
||||
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
|
||||
|
||||
.. note:: Triggers that depends on the previous fire time, such as the interval trigger, may
|
||||
seem to behave strangely since they are always passed the previous fire time produced by
|
||||
any of the given triggers.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
fire_times = [
|
||||
trigger.get_next_fire_time(previous_fire_time, now)
|
||||
for trigger in self.triggers
|
||||
]
|
||||
fire_times = [fire_time for fire_time in fire_times if fire_time is not None]
|
||||
if fire_times:
|
||||
return self._apply_jitter(min(fire_times), self.jitter, now)
|
||||
else:
|
||||
return None
|
||||
|
||||
def __str__(self):
|
||||
return "or[{}]".format(", ".join(str(trigger) for trigger in self.triggers))
|
||||
|
|
@ -0,0 +1,301 @@
|
|||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.triggers.cron.fields import (
|
||||
DEFAULT_VALUES,
|
||||
BaseField,
|
||||
DayOfMonthField,
|
||||
DayOfWeekField,
|
||||
MonthField,
|
||||
WeekField,
|
||||
)
|
||||
from apscheduler.util import (
|
||||
astimezone,
|
||||
convert_to_datetime,
|
||||
datetime_ceil,
|
||||
datetime_repr,
|
||||
datetime_utc_add,
|
||||
)
|
||||
|
||||
UTC = timezone.utc
|
||||
|
||||
|
||||
class CronTrigger(BaseTrigger):
|
||||
"""
|
||||
Triggers when current time matches all specified time constraints,
|
||||
similarly to how the UNIX cron scheduler works.
|
||||
|
||||
:param int|str year: 4-digit year
|
||||
:param int|str month: month (1-12)
|
||||
:param int|str day: day of month (1-31)
|
||||
:param int|str week: ISO week (1-53)
|
||||
:param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
|
||||
:param int|str hour: hour (0-23)
|
||||
:param int|str minute: minute (0-59)
|
||||
:param int|str second: second (0-59)
|
||||
:param datetime|str start_date: earliest possible date/time to trigger on (inclusive)
|
||||
:param datetime|str end_date: latest possible date/time to trigger on (inclusive)
|
||||
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults
|
||||
to scheduler timezone)
|
||||
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
|
||||
|
||||
.. note:: The first weekday is always **monday**.
|
||||
"""
|
||||
|
||||
FIELD_NAMES = (
|
||||
"year",
|
||||
"month",
|
||||
"day",
|
||||
"week",
|
||||
"day_of_week",
|
||||
"hour",
|
||||
"minute",
|
||||
"second",
|
||||
)
|
||||
FIELDS_MAP = {
|
||||
"year": BaseField,
|
||||
"month": MonthField,
|
||||
"week": WeekField,
|
||||
"day": DayOfMonthField,
|
||||
"day_of_week": DayOfWeekField,
|
||||
"hour": BaseField,
|
||||
"minute": BaseField,
|
||||
"second": BaseField,
|
||||
}
|
||||
|
||||
__slots__ = "end_date", "fields", "jitter", "start_date", "timezone"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
year=None,
|
||||
month=None,
|
||||
day=None,
|
||||
week=None,
|
||||
day_of_week=None,
|
||||
hour=None,
|
||||
minute=None,
|
||||
second=None,
|
||||
start_date=None,
|
||||
end_date=None,
|
||||
timezone=None,
|
||||
jitter=None,
|
||||
):
|
||||
if timezone:
|
||||
self.timezone = astimezone(timezone)
|
||||
elif isinstance(start_date, datetime) and start_date.tzinfo:
|
||||
self.timezone = astimezone(start_date.tzinfo)
|
||||
elif isinstance(end_date, datetime) and end_date.tzinfo:
|
||||
self.timezone = astimezone(end_date.tzinfo)
|
||||
else:
|
||||
self.timezone = get_localzone()
|
||||
|
||||
self.start_date = convert_to_datetime(start_date, self.timezone, "start_date")
|
||||
self.end_date = convert_to_datetime(end_date, self.timezone, "end_date")
|
||||
|
||||
self.jitter = jitter
|
||||
|
||||
values = dict(
|
||||
(key, value)
|
||||
for (key, value) in locals().items()
|
||||
if key in self.FIELD_NAMES and value is not None
|
||||
)
|
||||
self.fields = []
|
||||
assign_defaults = False
|
||||
for field_name in self.FIELD_NAMES:
|
||||
if field_name in values:
|
||||
exprs = values.pop(field_name)
|
||||
is_default = False
|
||||
assign_defaults = not values
|
||||
elif assign_defaults:
|
||||
exprs = DEFAULT_VALUES[field_name]
|
||||
is_default = True
|
||||
else:
|
||||
exprs = "*"
|
||||
is_default = True
|
||||
|
||||
field_class = self.FIELDS_MAP[field_name]
|
||||
field = field_class(field_name, exprs, is_default)
|
||||
self.fields.append(field)
|
||||
|
||||
@classmethod
|
||||
def from_crontab(cls, expr, timezone=None):
|
||||
"""
|
||||
Create a :class:`~CronTrigger` from a standard crontab expression.
|
||||
|
||||
See https://en.wikipedia.org/wiki/Cron for more information on the format accepted here.
|
||||
|
||||
:param expr: minute, hour, day of month, month, day of week
|
||||
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (
|
||||
defaults to scheduler timezone)
|
||||
:return: a :class:`~CronTrigger` instance
|
||||
|
||||
"""
|
||||
values = expr.split()
|
||||
if len(values) != 5:
|
||||
raise ValueError(f"Wrong number of fields; got {len(values)}, expected 5")
|
||||
|
||||
return cls(
|
||||
minute=values[0],
|
||||
hour=values[1],
|
||||
day=values[2],
|
||||
month=values[3],
|
||||
day_of_week=values[4],
|
||||
timezone=timezone,
|
||||
)
|
||||
|
||||
def _increment_field_value(self, dateval, fieldnum):
|
||||
"""
|
||||
Increments the designated field and resets all less significant fields to their minimum
|
||||
values.
|
||||
|
||||
:type dateval: datetime
|
||||
:type fieldnum: int
|
||||
:return: a tuple containing the new date, and the number of the field that was actually
|
||||
incremented
|
||||
:rtype: tuple
|
||||
"""
|
||||
|
||||
values = {}
|
||||
i = 0
|
||||
while i < len(self.fields):
|
||||
field = self.fields[i]
|
||||
if not field.REAL:
|
||||
if i == fieldnum:
|
||||
fieldnum -= 1
|
||||
i -= 1
|
||||
else:
|
||||
i += 1
|
||||
continue
|
||||
|
||||
if i < fieldnum:
|
||||
values[field.name] = field.get_value(dateval)
|
||||
i += 1
|
||||
elif i > fieldnum:
|
||||
values[field.name] = field.get_min(dateval)
|
||||
i += 1
|
||||
else:
|
||||
value = field.get_value(dateval)
|
||||
maxval = field.get_max(dateval)
|
||||
if value == maxval:
|
||||
fieldnum -= 1
|
||||
i -= 1
|
||||
else:
|
||||
values[field.name] = value + 1
|
||||
i += 1
|
||||
|
||||
difference = datetime(**values) - dateval.replace(tzinfo=None)
|
||||
dateval = datetime_utc_add(dateval, difference)
|
||||
return dateval, fieldnum
|
||||
|
||||
def _set_field_value(self, dateval, fieldnum, new_value):
|
||||
values = {}
|
||||
for i, field in enumerate(self.fields):
|
||||
if field.REAL:
|
||||
if i < fieldnum:
|
||||
values[field.name] = field.get_value(dateval)
|
||||
elif i > fieldnum:
|
||||
values[field.name] = field.get_min(dateval)
|
||||
else:
|
||||
values[field.name] = new_value
|
||||
|
||||
return datetime(**values, tzinfo=self.timezone, fold=dateval.fold)
|
||||
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
if previous_fire_time:
|
||||
start_date = min(
|
||||
now.astimezone(UTC),
|
||||
datetime_utc_add(
|
||||
previous_fire_time, timedelta(microseconds=1)
|
||||
).astimezone(UTC),
|
||||
).astimezone(self.timezone)
|
||||
if start_date == previous_fire_time:
|
||||
start_date = datetime_utc_add(start_date, timedelta(microseconds=1))
|
||||
else:
|
||||
start_date = (
|
||||
max(now.astimezone(UTC), self.start_date.astimezone(UTC)).astimezone(
|
||||
self.timezone
|
||||
)
|
||||
if self.start_date
|
||||
else now
|
||||
)
|
||||
|
||||
fieldnum = 0
|
||||
next_date = datetime_ceil(start_date).astimezone(self.timezone)
|
||||
while 0 <= fieldnum < len(self.fields):
|
||||
field = self.fields[fieldnum]
|
||||
curr_value = field.get_value(next_date)
|
||||
next_value = field.get_next_value(next_date)
|
||||
|
||||
if next_value is None:
|
||||
# No valid value was found
|
||||
next_date, fieldnum = self._increment_field_value(
|
||||
next_date, fieldnum - 1
|
||||
)
|
||||
elif next_value > curr_value:
|
||||
# A valid, but higher than the starting value, was found
|
||||
if field.REAL:
|
||||
next_date = self._set_field_value(next_date, fieldnum, next_value)
|
||||
fieldnum += 1
|
||||
else:
|
||||
next_date, fieldnum = self._increment_field_value(
|
||||
next_date, fieldnum
|
||||
)
|
||||
else:
|
||||
# A valid value was found, no changes necessary
|
||||
fieldnum += 1
|
||||
|
||||
# Return if the date has rolled past the end date
|
||||
if self.end_date and next_date > self.end_date:
|
||||
return None
|
||||
|
||||
if fieldnum >= 0:
|
||||
next_date = self._apply_jitter(next_date, self.jitter, now)
|
||||
return min(next_date, self.end_date) if self.end_date else next_date
|
||||
|
||||
def __getstate__(self):
|
||||
return {
|
||||
"version": 2,
|
||||
"timezone": self.timezone,
|
||||
"start_date": self.start_date,
|
||||
"end_date": self.end_date,
|
||||
"fields": self.fields,
|
||||
"jitter": self.jitter,
|
||||
}
|
||||
|
||||
def __setstate__(self, state):
|
||||
# This is for compatibility with APScheduler 3.0.x
|
||||
if isinstance(state, tuple):
|
||||
state = state[1]
|
||||
|
||||
if state.get("version", 1) > 2:
|
||||
raise ValueError(
|
||||
f"Got serialized data for version {state['version']} of "
|
||||
f"{self.__class__.__name__}, but only versions up to 2 can be handled"
|
||||
)
|
||||
|
||||
self.timezone = astimezone(state["timezone"])
|
||||
self.start_date = state["start_date"]
|
||||
self.end_date = state["end_date"]
|
||||
self.fields = state["fields"]
|
||||
self.jitter = state.get("jitter")
|
||||
|
||||
def __str__(self):
|
||||
options = [f"{f.name}='{f}'" for f in self.fields if not f.is_default]
|
||||
return "cron[{}]".format(", ".join(options))
|
||||
|
||||
def __repr__(self):
|
||||
options = [f"{f.name}='{f}'" for f in self.fields if not f.is_default]
|
||||
if self.start_date:
|
||||
options.append(f"start_date={datetime_repr(self.start_date)!r}")
|
||||
if self.end_date:
|
||||
options.append(f"end_date={datetime_repr(self.end_date)!r}")
|
||||
if self.jitter:
|
||||
options.append(f"jitter={self.jitter}")
|
||||
|
||||
return "<{} ({}, timezone='{}')>".format(
|
||||
self.__class__.__name__,
|
||||
", ".join(options),
|
||||
self.timezone,
|
||||
)
|
||||
|
|
@ -0,0 +1,285 @@
|
|||
"""This module contains the expressions applicable for CronTrigger's fields."""
|
||||
|
||||
__all__ = (
|
||||
"AllExpression",
|
||||
"LastDayOfMonthExpression",
|
||||
"RangeExpression",
|
||||
"WeekdayPositionExpression",
|
||||
"WeekdayRangeExpression",
|
||||
)
|
||||
|
||||
import re
|
||||
from calendar import monthrange
|
||||
|
||||
from apscheduler.util import asint
|
||||
|
||||
WEEKDAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
|
||||
MONTHS = [
|
||||
"jan",
|
||||
"feb",
|
||||
"mar",
|
||||
"apr",
|
||||
"may",
|
||||
"jun",
|
||||
"jul",
|
||||
"aug",
|
||||
"sep",
|
||||
"oct",
|
||||
"nov",
|
||||
"dec",
|
||||
]
|
||||
|
||||
|
||||
class AllExpression:
|
||||
value_re = re.compile(r"\*(?:/(?P<step>\d+))?$")
|
||||
|
||||
def __init__(self, step=None):
|
||||
self.step = asint(step)
|
||||
if self.step == 0:
|
||||
raise ValueError("Increment must be higher than 0")
|
||||
|
||||
def validate_range(self, field_name):
|
||||
from apscheduler.triggers.cron.fields import MAX_VALUES, MIN_VALUES
|
||||
|
||||
value_range = MAX_VALUES[field_name] - MIN_VALUES[field_name]
|
||||
if self.step and self.step > value_range:
|
||||
raise ValueError(
|
||||
f"the step value ({self.step}) is higher than the total range of the "
|
||||
f"expression ({value_range})"
|
||||
)
|
||||
|
||||
def get_next_value(self, date, field):
|
||||
start = field.get_value(date)
|
||||
minval = field.get_min(date)
|
||||
maxval = field.get_max(date)
|
||||
start = max(start, minval)
|
||||
|
||||
if not self.step:
|
||||
next = start
|
||||
else:
|
||||
distance_to_next = (self.step - (start - minval)) % self.step
|
||||
next = start + distance_to_next
|
||||
|
||||
if next <= maxval:
|
||||
return next
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__) and self.step == other.step
|
||||
|
||||
def __str__(self):
|
||||
if self.step:
|
||||
return f"*/{self.step}"
|
||||
return "*"
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}({self.step})"
|
||||
|
||||
|
||||
class RangeExpression(AllExpression):
|
||||
value_re = re.compile(r"(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$")
|
||||
|
||||
def __init__(self, first, last=None, step=None):
|
||||
super().__init__(step)
|
||||
first = asint(first)
|
||||
last = asint(last)
|
||||
if last is None and step is None:
|
||||
last = first
|
||||
if last is not None and first > last:
|
||||
raise ValueError(
|
||||
"The minimum value in a range must not be higher than the maximum"
|
||||
)
|
||||
self.first = first
|
||||
self.last = last
|
||||
|
||||
def validate_range(self, field_name):
|
||||
from apscheduler.triggers.cron.fields import MAX_VALUES, MIN_VALUES
|
||||
|
||||
super().validate_range(field_name)
|
||||
if self.first < MIN_VALUES[field_name]:
|
||||
raise ValueError(
|
||||
f"the first value ({self.first}) is lower than the minimum value ({MIN_VALUES[field_name]})"
|
||||
)
|
||||
if self.last is not None and self.last > MAX_VALUES[field_name]:
|
||||
raise ValueError(
|
||||
f"the last value ({self.last}) is higher than the maximum value ({MAX_VALUES[field_name]})"
|
||||
)
|
||||
value_range = (self.last or MAX_VALUES[field_name]) - self.first
|
||||
if self.step and self.step > value_range:
|
||||
raise ValueError(
|
||||
f"the step value ({self.step}) is higher than the total range of the "
|
||||
f"expression ({value_range})"
|
||||
)
|
||||
|
||||
def get_next_value(self, date, field):
|
||||
startval = field.get_value(date)
|
||||
minval = field.get_min(date)
|
||||
maxval = field.get_max(date)
|
||||
|
||||
# Apply range limits
|
||||
minval = max(minval, self.first)
|
||||
maxval = min(maxval, self.last) if self.last is not None else maxval
|
||||
nextval = max(minval, startval)
|
||||
|
||||
# Apply the step if defined
|
||||
if self.step:
|
||||
distance_to_next = (self.step - (nextval - minval)) % self.step
|
||||
nextval += distance_to_next
|
||||
|
||||
return nextval if nextval <= maxval else None
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
isinstance(other, self.__class__)
|
||||
and self.first == other.first
|
||||
and self.last == other.last
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
if self.last != self.first and self.last is not None:
|
||||
range = f"{self.first}-{self.last}"
|
||||
else:
|
||||
range = str(self.first)
|
||||
|
||||
if self.step:
|
||||
return f"{range}/{self.step}"
|
||||
|
||||
return range
|
||||
|
||||
def __repr__(self):
|
||||
args = [str(self.first)]
|
||||
if (self.last != self.first and self.last is not None) or self.step:
|
||||
args.append(str(self.last))
|
||||
|
||||
if self.step:
|
||||
args.append(str(self.step))
|
||||
|
||||
return "{}({})".format(self.__class__.__name__, ", ".join(args))
|
||||
|
||||
|
||||
class MonthRangeExpression(RangeExpression):
|
||||
value_re = re.compile(r"(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?", re.IGNORECASE)
|
||||
|
||||
def __init__(self, first, last=None):
|
||||
try:
|
||||
first_num = MONTHS.index(first.lower()) + 1
|
||||
except ValueError:
|
||||
raise ValueError(f'Invalid month name "{first}"')
|
||||
|
||||
if last:
|
||||
try:
|
||||
last_num = MONTHS.index(last.lower()) + 1
|
||||
except ValueError:
|
||||
raise ValueError(f'Invalid month name "{last}"')
|
||||
else:
|
||||
last_num = None
|
||||
|
||||
super().__init__(first_num, last_num)
|
||||
|
||||
def __str__(self):
|
||||
if self.last != self.first and self.last is not None:
|
||||
return f"{MONTHS[self.first - 1]}-{MONTHS[self.last - 1]}"
|
||||
return MONTHS[self.first - 1]
|
||||
|
||||
def __repr__(self):
|
||||
args = [f"'{MONTHS[self.first]}'"]
|
||||
if self.last != self.first and self.last is not None:
|
||||
args.append(f"'{MONTHS[self.last - 1]}'")
|
||||
return "{}({})".format(self.__class__.__name__, ", ".join(args))
|
||||
|
||||
|
||||
class WeekdayRangeExpression(RangeExpression):
|
||||
value_re = re.compile(r"(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?", re.IGNORECASE)
|
||||
|
||||
def __init__(self, first, last=None):
|
||||
try:
|
||||
first_num = WEEKDAYS.index(first.lower())
|
||||
except ValueError:
|
||||
raise ValueError(f'Invalid weekday name "{first}"')
|
||||
|
||||
if last:
|
||||
try:
|
||||
last_num = WEEKDAYS.index(last.lower())
|
||||
except ValueError:
|
||||
raise ValueError(f'Invalid weekday name "{last}"')
|
||||
else:
|
||||
last_num = None
|
||||
|
||||
super().__init__(first_num, last_num)
|
||||
|
||||
def __str__(self):
|
||||
if self.last != self.first and self.last is not None:
|
||||
return f"{WEEKDAYS[self.first]}-{WEEKDAYS[self.last]}"
|
||||
return WEEKDAYS[self.first]
|
||||
|
||||
def __repr__(self):
|
||||
args = [f"'{WEEKDAYS[self.first]}'"]
|
||||
if self.last != self.first and self.last is not None:
|
||||
args.append(f"'{WEEKDAYS[self.last]}'")
|
||||
return "{}({})".format(self.__class__.__name__, ", ".join(args))
|
||||
|
||||
|
||||
class WeekdayPositionExpression(AllExpression):
|
||||
options = ["1st", "2nd", "3rd", "4th", "5th", "last"]
|
||||
value_re = re.compile(
|
||||
r"(?P<option_name>{}) +(?P<weekday_name>(?:\d+|\w+))".format("|".join(options)),
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
def __init__(self, option_name, weekday_name):
|
||||
super().__init__(None)
|
||||
try:
|
||||
self.option_num = self.options.index(option_name.lower())
|
||||
except ValueError:
|
||||
raise ValueError(f'Invalid weekday position "{option_name}"')
|
||||
|
||||
try:
|
||||
self.weekday = WEEKDAYS.index(weekday_name.lower())
|
||||
except ValueError:
|
||||
raise ValueError(f'Invalid weekday name "{weekday_name}"')
|
||||
|
||||
def get_next_value(self, date, field):
|
||||
# Figure out the weekday of the month's first day and the number of days in that month
|
||||
first_day_wday, last_day = monthrange(date.year, date.month)
|
||||
|
||||
# Calculate which day of the month is the first of the target weekdays
|
||||
first_hit_day = self.weekday - first_day_wday + 1
|
||||
if first_hit_day <= 0:
|
||||
first_hit_day += 7
|
||||
|
||||
# Calculate what day of the month the target weekday would be
|
||||
if self.option_num < 5:
|
||||
target_day = first_hit_day + self.option_num * 7
|
||||
else:
|
||||
target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7
|
||||
|
||||
if target_day <= last_day and target_day >= date.day:
|
||||
return target_day
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
super().__eq__(other)
|
||||
and self.option_num == other.option_num
|
||||
and self.weekday == other.weekday
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.options[self.option_num]} {WEEKDAYS[self.weekday]}"
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}('{self.options[self.option_num]}', '{WEEKDAYS[self.weekday]}')"
|
||||
|
||||
|
||||
class LastDayOfMonthExpression(AllExpression):
|
||||
value_re = re.compile(r"last", re.IGNORECASE)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(None)
|
||||
|
||||
def get_next_value(self, date, field):
|
||||
return monthrange(date.year, date.month)[1]
|
||||
|
||||
def __str__(self):
|
||||
return "last"
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}()"
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
"""Fields represent CronTrigger options which map to :class:`~datetime.datetime` fields."""
|
||||
|
||||
__all__ = (
|
||||
"DEFAULT_VALUES",
|
||||
"MAX_VALUES",
|
||||
"MIN_VALUES",
|
||||
"BaseField",
|
||||
"DayOfMonthField",
|
||||
"DayOfWeekField",
|
||||
"WeekField",
|
||||
)
|
||||
|
||||
import re
|
||||
from calendar import monthrange
|
||||
|
||||
from apscheduler.triggers.cron.expressions import (
|
||||
AllExpression,
|
||||
LastDayOfMonthExpression,
|
||||
MonthRangeExpression,
|
||||
RangeExpression,
|
||||
WeekdayPositionExpression,
|
||||
WeekdayRangeExpression,
|
||||
)
|
||||
|
||||
MIN_VALUES = {
|
||||
"year": 1970,
|
||||
"month": 1,
|
||||
"day": 1,
|
||||
"week": 1,
|
||||
"day_of_week": 0,
|
||||
"hour": 0,
|
||||
"minute": 0,
|
||||
"second": 0,
|
||||
}
|
||||
MAX_VALUES = {
|
||||
"year": 9999,
|
||||
"month": 12,
|
||||
"day": 31,
|
||||
"week": 53,
|
||||
"day_of_week": 6,
|
||||
"hour": 23,
|
||||
"minute": 59,
|
||||
"second": 59,
|
||||
}
|
||||
DEFAULT_VALUES = {
|
||||
"year": "*",
|
||||
"month": 1,
|
||||
"day": 1,
|
||||
"week": "*",
|
||||
"day_of_week": "*",
|
||||
"hour": 0,
|
||||
"minute": 0,
|
||||
"second": 0,
|
||||
}
|
||||
SEPARATOR = re.compile(" *, *")
|
||||
|
||||
|
||||
class BaseField:
|
||||
REAL = True
|
||||
COMPILERS = [AllExpression, RangeExpression]
|
||||
|
||||
def __init__(self, name, exprs, is_default=False):
|
||||
self.name = name
|
||||
self.is_default = is_default
|
||||
self.compile_expressions(exprs)
|
||||
|
||||
def get_min(self, dateval):
|
||||
return MIN_VALUES[self.name]
|
||||
|
||||
def get_max(self, dateval):
|
||||
return MAX_VALUES[self.name]
|
||||
|
||||
def get_value(self, dateval):
|
||||
return getattr(dateval, self.name)
|
||||
|
||||
def get_next_value(self, dateval):
|
||||
smallest = None
|
||||
for expr in self.expressions:
|
||||
value = expr.get_next_value(dateval, self)
|
||||
if smallest is None or (value is not None and value < smallest):
|
||||
smallest = value
|
||||
|
||||
return smallest
|
||||
|
||||
def compile_expressions(self, exprs):
|
||||
self.expressions = []
|
||||
|
||||
# Split a comma-separated expression list, if any
|
||||
for expr in SEPARATOR.split(str(exprs).strip()):
|
||||
self.compile_expression(expr)
|
||||
|
||||
def compile_expression(self, expr):
|
||||
for compiler in self.COMPILERS:
|
||||
match = compiler.value_re.match(expr)
|
||||
if match:
|
||||
compiled_expr = compiler(**match.groupdict())
|
||||
|
||||
try:
|
||||
compiled_expr.validate_range(self.name)
|
||||
except ValueError as e:
|
||||
raise ValueError(
|
||||
f"Error validating expression {expr!r}: {e}"
|
||||
) from None
|
||||
|
||||
self.expressions.append(compiled_expr)
|
||||
return
|
||||
|
||||
raise ValueError(f'Unrecognized expression "{expr}" for field "{self.name}"')
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
isinstance(self, self.__class__) and self.expressions == other.expressions
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
expr_strings = (str(e) for e in self.expressions)
|
||||
return ",".join(expr_strings)
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}('{self.name}', '{self}')"
|
||||
|
||||
|
||||
class WeekField(BaseField):
|
||||
REAL = False
|
||||
|
||||
def get_value(self, dateval):
|
||||
return dateval.isocalendar()[1]
|
||||
|
||||
|
||||
class DayOfMonthField(BaseField):
|
||||
COMPILERS = BaseField.COMPILERS + [
|
||||
WeekdayPositionExpression,
|
||||
LastDayOfMonthExpression,
|
||||
]
|
||||
|
||||
def get_max(self, dateval):
|
||||
return monthrange(dateval.year, dateval.month)[1]
|
||||
|
||||
|
||||
class DayOfWeekField(BaseField):
|
||||
REAL = False
|
||||
COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
|
||||
|
||||
def get_value(self, dateval):
|
||||
return dateval.weekday()
|
||||
|
||||
|
||||
class MonthField(BaseField):
|
||||
COMPILERS = BaseField.COMPILERS + [MonthRangeExpression]
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
from datetime import datetime
|
||||
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.util import astimezone, convert_to_datetime, datetime_repr
|
||||
|
||||
|
||||
class DateTrigger(BaseTrigger):
|
||||
"""
|
||||
Triggers once on the given datetime. If ``run_date`` is left empty, current time is used.
|
||||
|
||||
:param datetime|str run_date: the date/time to run the job at
|
||||
:param datetime.tzinfo|str timezone: time zone for ``run_date`` if it doesn't have one already
|
||||
"""
|
||||
|
||||
__slots__ = "run_date"
|
||||
|
||||
def __init__(self, run_date=None, timezone=None):
|
||||
timezone = astimezone(timezone) or get_localzone()
|
||||
if run_date is not None:
|
||||
self.run_date = convert_to_datetime(run_date, timezone, "run_date")
|
||||
else:
|
||||
self.run_date = datetime.now(timezone)
|
||||
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
return self.run_date if previous_fire_time is None else None
|
||||
|
||||
def __getstate__(self):
|
||||
return {"version": 1, "run_date": self.run_date}
|
||||
|
||||
def __setstate__(self, state):
|
||||
# This is for compatibility with APScheduler 3.0.x
|
||||
if isinstance(state, tuple):
|
||||
state = state[1]
|
||||
|
||||
if state.get("version", 1) > 1:
|
||||
raise ValueError(
|
||||
f"Got serialized data for version {state['version']} of "
|
||||
f"{self.__class__.__name__}, but only version 1 can be handled"
|
||||
)
|
||||
|
||||
self.run_date = state["run_date"]
|
||||
|
||||
def __str__(self):
|
||||
return f"date[{datetime_repr(self.run_date)}]"
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"<{self.__class__.__name__} (run_date='{datetime_repr(self.run_date)}')>"
|
||||
)
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
import random
|
||||
from datetime import datetime, timedelta
|
||||
from math import ceil
|
||||
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.util import (
|
||||
astimezone,
|
||||
convert_to_datetime,
|
||||
datetime_repr,
|
||||
)
|
||||
|
||||
|
||||
class IntervalTrigger(BaseTrigger):
|
||||
"""
|
||||
Triggers on specified intervals, starting on ``start_date`` if specified, ``datetime.now()`` +
|
||||
interval otherwise.
|
||||
|
||||
:param int weeks: number of weeks to wait
|
||||
:param int days: number of days to wait
|
||||
:param int hours: number of hours to wait
|
||||
:param int minutes: number of minutes to wait
|
||||
:param int seconds: number of seconds to wait
|
||||
:param datetime|str start_date: starting point for the interval calculation
|
||||
:param datetime|str end_date: latest possible date/time to trigger on
|
||||
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations
|
||||
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
|
||||
"""
|
||||
|
||||
__slots__ = (
|
||||
"end_date",
|
||||
"interval",
|
||||
"interval_length",
|
||||
"jitter",
|
||||
"start_date",
|
||||
"timezone",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
weeks=0,
|
||||
days=0,
|
||||
hours=0,
|
||||
minutes=0,
|
||||
seconds=0,
|
||||
start_date=None,
|
||||
end_date=None,
|
||||
timezone=None,
|
||||
jitter=None,
|
||||
):
|
||||
self.interval = timedelta(
|
||||
weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds
|
||||
)
|
||||
self.interval_length = self.interval.total_seconds()
|
||||
if self.interval_length == 0:
|
||||
self.interval = timedelta(seconds=1)
|
||||
self.interval_length = 1
|
||||
|
||||
if timezone:
|
||||
self.timezone = astimezone(timezone)
|
||||
elif isinstance(start_date, datetime) and start_date.tzinfo:
|
||||
self.timezone = astimezone(start_date.tzinfo)
|
||||
elif isinstance(end_date, datetime) and end_date.tzinfo:
|
||||
self.timezone = astimezone(end_date.tzinfo)
|
||||
else:
|
||||
self.timezone = get_localzone()
|
||||
|
||||
start_date = start_date or (datetime.now(self.timezone) + self.interval)
|
||||
self.start_date = convert_to_datetime(start_date, self.timezone, "start_date")
|
||||
self.end_date = convert_to_datetime(end_date, self.timezone, "end_date")
|
||||
|
||||
self.jitter = jitter
|
||||
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
if previous_fire_time:
|
||||
next_fire_time = previous_fire_time.timestamp() + self.interval_length
|
||||
elif self.start_date > now:
|
||||
next_fire_time = self.start_date.timestamp()
|
||||
else:
|
||||
timediff = now.timestamp() - self.start_date.timestamp()
|
||||
next_interval_num = ceil(timediff / self.interval_length)
|
||||
next_fire_time = (
|
||||
self.start_date.timestamp() + self.interval_length * next_interval_num
|
||||
)
|
||||
|
||||
if self.jitter is not None:
|
||||
next_fire_time += random.uniform(0, self.jitter)
|
||||
|
||||
if not self.end_date or next_fire_time <= self.end_date.timestamp():
|
||||
return datetime.fromtimestamp(next_fire_time, tz=self.timezone)
|
||||
|
||||
def __getstate__(self):
|
||||
return {
|
||||
"version": 2,
|
||||
"timezone": astimezone(self.timezone),
|
||||
"start_date": self.start_date,
|
||||
"end_date": self.end_date,
|
||||
"interval": self.interval,
|
||||
"jitter": self.jitter,
|
||||
}
|
||||
|
||||
def __setstate__(self, state):
|
||||
# This is for compatibility with APScheduler 3.0.x
|
||||
if isinstance(state, tuple):
|
||||
state = state[1]
|
||||
|
||||
if state.get("version", 1) > 2:
|
||||
raise ValueError(
|
||||
f"Got serialized data for version {state['version']} of "
|
||||
f"{self.__class__.__name__}, but only versions up to 2 can be handled"
|
||||
)
|
||||
|
||||
self.timezone = state["timezone"]
|
||||
self.start_date = state["start_date"]
|
||||
self.end_date = state["end_date"]
|
||||
self.interval = state["interval"]
|
||||
self.interval_length = self.interval.total_seconds()
|
||||
self.jitter = state.get("jitter")
|
||||
|
||||
def __str__(self):
|
||||
return f"interval[{self.interval!s}]"
|
||||
|
||||
def __repr__(self):
|
||||
options = [
|
||||
f"interval={self.interval!r}",
|
||||
f"start_date={datetime_repr(self.start_date)!r}",
|
||||
]
|
||||
if self.end_date:
|
||||
options.append(f"end_date={datetime_repr(self.end_date)!r}")
|
||||
if self.jitter:
|
||||
options.append(f"jitter={self.jitter}")
|
||||
|
||||
return "<{} ({}, timezone='{}')>".format(
|
||||
self.__class__.__name__,
|
||||
", ".join(options),
|
||||
self.timezone,
|
||||
)
|
||||
485
venv/lib/python3.11/site-packages/apscheduler/util.py
Normal file
485
venv/lib/python3.11/site-packages/apscheduler/util.py
Normal file
|
|
@ -0,0 +1,485 @@
|
|||
"""This module contains several handy functions primarily meant for internal use."""
|
||||
|
||||
__all__ = (
|
||||
"asbool",
|
||||
"asint",
|
||||
"astimezone",
|
||||
"check_callable_args",
|
||||
"convert_to_datetime",
|
||||
"datetime_ceil",
|
||||
"datetime_to_utc_timestamp",
|
||||
"get_callable_name",
|
||||
"localize",
|
||||
"maybe_ref",
|
||||
"normalize",
|
||||
"obj_to_ref",
|
||||
"ref_to_obj",
|
||||
"undefined",
|
||||
"utc_timestamp_to_datetime",
|
||||
)
|
||||
|
||||
import re
|
||||
import sys
|
||||
from calendar import timegm
|
||||
from datetime import date, datetime, time, timedelta, timezone, tzinfo
|
||||
from functools import partial
|
||||
from inspect import isbuiltin, isclass, isfunction, ismethod, signature
|
||||
|
||||
if sys.version_info < (3, 14):
|
||||
from asyncio import iscoroutinefunction
|
||||
else:
|
||||
from inspect import iscoroutinefunction
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
from backports.zoneinfo import ZoneInfo
|
||||
else:
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
UTC = timezone.utc
|
||||
|
||||
|
||||
class _Undefined:
|
||||
def __nonzero__(self):
|
||||
return False
|
||||
|
||||
def __bool__(self):
|
||||
return False
|
||||
|
||||
def __repr__(self):
|
||||
return "<undefined>"
|
||||
|
||||
|
||||
undefined = (
|
||||
_Undefined()
|
||||
) #: a unique object that only signifies that no value is defined
|
||||
|
||||
|
||||
def asint(text):
|
||||
"""
|
||||
Safely converts a string to an integer, returning ``None`` if the string is ``None``.
|
||||
|
||||
:type text: str
|
||||
:rtype: int
|
||||
|
||||
"""
|
||||
if text is not None:
|
||||
return int(text)
|
||||
|
||||
|
||||
def asbool(obj):
|
||||
"""
|
||||
Interprets an object as a boolean value.
|
||||
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
if isinstance(obj, str):
|
||||
obj = obj.strip().lower()
|
||||
if obj in ("true", "yes", "on", "y", "t", "1"):
|
||||
return True
|
||||
|
||||
if obj in ("false", "no", "off", "n", "f", "0"):
|
||||
return False
|
||||
|
||||
raise ValueError(f'Unable to interpret value "{obj}" as boolean')
|
||||
|
||||
return bool(obj)
|
||||
|
||||
|
||||
def astimezone(obj):
|
||||
"""
|
||||
Interprets an object as a timezone.
|
||||
|
||||
:rtype: tzinfo
|
||||
|
||||
"""
|
||||
if isinstance(obj, str):
|
||||
if obj == "UTC":
|
||||
return timezone.utc
|
||||
|
||||
return ZoneInfo(obj)
|
||||
|
||||
if isinstance(obj, tzinfo):
|
||||
if obj.tzname(None) == "local":
|
||||
raise ValueError(
|
||||
"Unable to determine the name of the local timezone -- you must "
|
||||
"explicitly specify the name of the local timezone. Please refrain "
|
||||
"from using timezones like EST to prevent problems with daylight "
|
||||
"saving time. Instead, use a locale based timezone name (such as "
|
||||
"Europe/Helsinki)."
|
||||
)
|
||||
elif isinstance(obj, ZoneInfo):
|
||||
return obj
|
||||
elif hasattr(obj, "zone"):
|
||||
# pytz timezones
|
||||
if obj.zone:
|
||||
return ZoneInfo(obj.zone)
|
||||
|
||||
return timezone(obj._offset)
|
||||
|
||||
return obj
|
||||
|
||||
if obj is not None:
|
||||
raise TypeError(f"Expected tzinfo, got {obj.__class__.__name__} instead")
|
||||
|
||||
|
||||
def asdate(obj):
|
||||
if isinstance(obj, str):
|
||||
return date.fromisoformat(obj)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
_DATE_REGEX = re.compile(
|
||||
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
|
||||
r"(?:[ T](?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})"
|
||||
r"(?:\.(?P<microsecond>\d{1,6}))?"
|
||||
r"(?P<timezone>Z|[+-]\d\d:\d\d)?)?$"
|
||||
)
|
||||
|
||||
|
||||
def convert_to_datetime(input, tz, arg_name):
|
||||
"""
|
||||
Converts the given object to a timezone aware datetime object.
|
||||
|
||||
If a timezone aware datetime object is passed, it is returned unmodified.
|
||||
If a native datetime object is passed, it is given the specified timezone.
|
||||
If the input is a string, it is parsed as a datetime with the given timezone.
|
||||
|
||||
Date strings are accepted in three different forms: date only (Y-m-d), date with
|
||||
time (Y-m-d H:M:S) or with date+time with microseconds (Y-m-d H:M:S.micro).
|
||||
Additionally you can override the time zone by giving a specific offset in the
|
||||
format specified by ISO 8601: Z (UTC), +HH:MM or -HH:MM.
|
||||
|
||||
:param str|datetime input: the datetime or string to convert to a timezone aware
|
||||
datetime
|
||||
:param datetime.tzinfo tz: timezone to interpret ``input`` in
|
||||
:param str arg_name: the name of the argument (used in an error message)
|
||||
:rtype: datetime
|
||||
|
||||
"""
|
||||
if input is None:
|
||||
return
|
||||
elif isinstance(input, datetime):
|
||||
datetime_ = input
|
||||
elif isinstance(input, date):
|
||||
datetime_ = datetime.combine(input, time())
|
||||
elif isinstance(input, str):
|
||||
m = _DATE_REGEX.match(input)
|
||||
if not m:
|
||||
raise ValueError("Invalid date string")
|
||||
|
||||
values = m.groupdict()
|
||||
tzname = values.pop("timezone")
|
||||
if tzname == "Z":
|
||||
tz = timezone.utc
|
||||
elif tzname:
|
||||
hours, minutes = (int(x) for x in tzname[1:].split(":"))
|
||||
sign = 1 if tzname[0] == "+" else -1
|
||||
tz = timezone(sign * timedelta(hours=hours, minutes=minutes))
|
||||
|
||||
values = {k: int(v or 0) for k, v in values.items()}
|
||||
datetime_ = datetime(**values)
|
||||
else:
|
||||
raise TypeError(f"Unsupported type for {arg_name}: {input.__class__.__name__}")
|
||||
|
||||
if datetime_.tzinfo is not None:
|
||||
return datetime_
|
||||
if tz is None:
|
||||
raise ValueError(
|
||||
f'The "tz" argument must be specified if {arg_name} has no timezone information'
|
||||
)
|
||||
if isinstance(tz, str):
|
||||
tz = astimezone(tz)
|
||||
|
||||
return localize(datetime_, tz)
|
||||
|
||||
|
||||
def datetime_to_utc_timestamp(timeval):
|
||||
"""
|
||||
Converts a datetime instance to a timestamp.
|
||||
|
||||
:type timeval: datetime
|
||||
:rtype: float
|
||||
|
||||
"""
|
||||
if timeval is not None:
|
||||
return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000
|
||||
|
||||
|
||||
def utc_timestamp_to_datetime(timestamp):
|
||||
"""
|
||||
Converts the given timestamp to a datetime instance.
|
||||
|
||||
:type timestamp: float
|
||||
:rtype: datetime
|
||||
|
||||
"""
|
||||
if timestamp is not None:
|
||||
return datetime.fromtimestamp(timestamp, timezone.utc)
|
||||
|
||||
|
||||
def timedelta_seconds(delta):
|
||||
"""
|
||||
Converts the given timedelta to seconds.
|
||||
|
||||
:type delta: timedelta
|
||||
:rtype: float
|
||||
|
||||
"""
|
||||
return delta.days * 24 * 60 * 60 + delta.seconds + delta.microseconds / 1000000.0
|
||||
|
||||
|
||||
def datetime_ceil(dateval):
|
||||
"""
|
||||
Rounds the given datetime object upwards.
|
||||
|
||||
:type dateval: datetime
|
||||
|
||||
"""
|
||||
if dateval.microsecond > 0:
|
||||
return datetime_utc_add(
|
||||
dateval, timedelta(seconds=1, microseconds=-dateval.microsecond)
|
||||
)
|
||||
|
||||
return dateval
|
||||
|
||||
|
||||
def datetime_utc_add(dateval: datetime, tdelta: timedelta) -> datetime:
|
||||
"""
|
||||
Adds an timedelta to a datetime in UTC for correct datetime arithmetic across
|
||||
Daylight Saving Time changes
|
||||
|
||||
:param dateval: The date to add to
|
||||
:type dateval: datetime
|
||||
:param operand: The timedelta to add to the datetime
|
||||
:type operand: timedelta
|
||||
:return: The sum of the datetime and the timedelta
|
||||
:rtype: datetime
|
||||
"""
|
||||
original_tz = dateval.tzinfo
|
||||
if original_tz is None:
|
||||
return dateval + tdelta
|
||||
|
||||
return (dateval.astimezone(UTC) + tdelta).astimezone(original_tz)
|
||||
|
||||
|
||||
def datetime_repr(dateval):
|
||||
return dateval.strftime("%Y-%m-%d %H:%M:%S %Z") if dateval else "None"
|
||||
|
||||
|
||||
def timezone_repr(timezone: tzinfo) -> str:
|
||||
if isinstance(timezone, ZoneInfo):
|
||||
return timezone.key
|
||||
|
||||
return repr(timezone)
|
||||
|
||||
|
||||
def get_callable_name(func):
|
||||
"""
|
||||
Returns the best available display name for the given function/callable.
|
||||
|
||||
:rtype: str
|
||||
|
||||
"""
|
||||
if ismethod(func):
|
||||
self = func.__self__
|
||||
cls = self if isclass(self) else type(self)
|
||||
return f"{cls.__qualname__}.{func.__name__}"
|
||||
elif isclass(func) or isfunction(func) or isbuiltin(func):
|
||||
return func.__qualname__
|
||||
elif hasattr(func, "__call__") and callable(func.__call__):
|
||||
# instance of a class with a __call__ method
|
||||
return type(func).__qualname__
|
||||
|
||||
raise TypeError(
|
||||
f"Unable to determine a name for {func!r} -- maybe it is not a callable?"
|
||||
)
|
||||
|
||||
|
||||
def obj_to_ref(obj):
|
||||
"""
|
||||
Returns the path to the given callable.
|
||||
|
||||
:rtype: str
|
||||
:raises TypeError: if the given object is not callable
|
||||
:raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested
|
||||
function
|
||||
|
||||
"""
|
||||
if isinstance(obj, partial):
|
||||
raise ValueError("Cannot create a reference to a partial()")
|
||||
|
||||
name = get_callable_name(obj)
|
||||
if "<lambda>" in name:
|
||||
raise ValueError("Cannot create a reference to a lambda")
|
||||
if "<locals>" in name:
|
||||
raise ValueError("Cannot create a reference to a nested function")
|
||||
|
||||
if ismethod(obj):
|
||||
module = obj.__self__.__module__
|
||||
else:
|
||||
module = obj.__module__
|
||||
|
||||
return f"{module}:{name}"
|
||||
|
||||
|
||||
def ref_to_obj(ref):
|
||||
"""
|
||||
Returns the object pointed to by ``ref``.
|
||||
|
||||
:type ref: str
|
||||
|
||||
"""
|
||||
if not isinstance(ref, str):
|
||||
raise TypeError("References must be strings")
|
||||
if ":" not in ref:
|
||||
raise ValueError("Invalid reference")
|
||||
|
||||
modulename, rest = ref.split(":", 1)
|
||||
try:
|
||||
obj = __import__(modulename, fromlist=[rest])
|
||||
except ImportError as exc:
|
||||
raise LookupError(
|
||||
f"Error resolving reference {ref}: could not import module"
|
||||
) from exc
|
||||
|
||||
try:
|
||||
for name in rest.split("."):
|
||||
obj = getattr(obj, name)
|
||||
return obj
|
||||
except Exception:
|
||||
raise LookupError(f"Error resolving reference {ref}: error looking up object")
|
||||
|
||||
|
||||
def maybe_ref(ref):
|
||||
"""
|
||||
Returns the object that the given reference points to, if it is indeed a reference.
|
||||
If it is not a reference, the object is returned as-is.
|
||||
|
||||
"""
|
||||
if not isinstance(ref, str):
|
||||
return ref
|
||||
return ref_to_obj(ref)
|
||||
|
||||
|
||||
def check_callable_args(func, args, kwargs):
|
||||
"""
|
||||
Ensures that the given callable can be called with the given arguments.
|
||||
|
||||
:type args: tuple
|
||||
:type kwargs: dict
|
||||
|
||||
"""
|
||||
pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs
|
||||
positional_only_kwargs = [] # positional-only parameters that have a match in kwargs
|
||||
unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs
|
||||
unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs
|
||||
unmatched_args = list(
|
||||
args
|
||||
) # args that didn't match any of the parameters in the signature
|
||||
# kwargs that didn't match any of the parameters in the signature
|
||||
unmatched_kwargs = list(kwargs)
|
||||
# indicates if the signature defines *args and **kwargs respectively
|
||||
has_varargs = has_var_kwargs = False
|
||||
|
||||
try:
|
||||
sig = signature(func, follow_wrapped=False)
|
||||
except ValueError:
|
||||
# signature() doesn't work against every kind of callable
|
||||
return
|
||||
|
||||
for param in sig.parameters.values():
|
||||
if param.kind == param.POSITIONAL_OR_KEYWORD:
|
||||
if param.name in unmatched_kwargs and unmatched_args:
|
||||
pos_kwargs_conflicts.append(param.name)
|
||||
elif unmatched_args:
|
||||
del unmatched_args[0]
|
||||
elif param.name in unmatched_kwargs:
|
||||
unmatched_kwargs.remove(param.name)
|
||||
elif param.default is param.empty:
|
||||
unsatisfied_args.append(param.name)
|
||||
elif param.kind == param.POSITIONAL_ONLY:
|
||||
if unmatched_args:
|
||||
del unmatched_args[0]
|
||||
elif param.name in unmatched_kwargs:
|
||||
unmatched_kwargs.remove(param.name)
|
||||
positional_only_kwargs.append(param.name)
|
||||
elif param.default is param.empty:
|
||||
unsatisfied_args.append(param.name)
|
||||
elif param.kind == param.KEYWORD_ONLY:
|
||||
if param.name in unmatched_kwargs:
|
||||
unmatched_kwargs.remove(param.name)
|
||||
elif param.default is param.empty:
|
||||
unsatisfied_kwargs.append(param.name)
|
||||
elif param.kind == param.VAR_POSITIONAL:
|
||||
has_varargs = True
|
||||
elif param.kind == param.VAR_KEYWORD:
|
||||
has_var_kwargs = True
|
||||
|
||||
# Make sure there are no conflicts between args and kwargs
|
||||
if pos_kwargs_conflicts:
|
||||
raise ValueError(
|
||||
"The following arguments are supplied in both args and kwargs: {}".format(
|
||||
", ".join(pos_kwargs_conflicts)
|
||||
)
|
||||
)
|
||||
|
||||
# Check if keyword arguments are being fed to positional-only parameters
|
||||
if positional_only_kwargs:
|
||||
raise ValueError(
|
||||
"The following arguments cannot be given as keyword arguments: {}".format(
|
||||
", ".join(positional_only_kwargs)
|
||||
)
|
||||
)
|
||||
|
||||
# Check that the number of positional arguments minus the number of matched kwargs
|
||||
# matches the argspec
|
||||
if unsatisfied_args:
|
||||
raise ValueError(
|
||||
"The following arguments have not been supplied: {}".format(
|
||||
", ".join(unsatisfied_args)
|
||||
)
|
||||
)
|
||||
|
||||
# Check that all keyword-only arguments have been supplied
|
||||
if unsatisfied_kwargs:
|
||||
raise ValueError(
|
||||
"The following keyword-only arguments have not been supplied in kwargs: "
|
||||
"{}".format(", ".join(unsatisfied_kwargs))
|
||||
)
|
||||
|
||||
# Check that the callable can accept the given number of positional arguments
|
||||
if not has_varargs and unmatched_args:
|
||||
raise ValueError(
|
||||
f"The list of positional arguments is longer than the target callable can "
|
||||
f"handle (allowed: {len(args) - len(unmatched_args)}, given in args: "
|
||||
f"{len(args)})"
|
||||
)
|
||||
|
||||
# Check that the callable can accept the given keyword arguments
|
||||
if not has_var_kwargs and unmatched_kwargs:
|
||||
raise ValueError(
|
||||
"The target callable does not accept the following keyword arguments: "
|
||||
"{}".format(", ".join(unmatched_kwargs))
|
||||
)
|
||||
|
||||
|
||||
def iscoroutinefunction_partial(f):
|
||||
while isinstance(f, partial):
|
||||
f = f.func
|
||||
|
||||
# The asyncio version of iscoroutinefunction includes testing for @coroutine
|
||||
# decorations vs. the inspect version which does not.
|
||||
return iscoroutinefunction(f)
|
||||
|
||||
|
||||
def normalize(dt):
|
||||
return datetime.fromtimestamp(dt.timestamp(), dt.tzinfo)
|
||||
|
||||
|
||||
def localize(dt, tzinfo):
|
||||
if hasattr(tzinfo, "localize"):
|
||||
return tzinfo.localize(dt)
|
||||
|
||||
return normalize(dt.replace(tzinfo=tzinfo))
|
||||
Loading…
Add table
Add a link
Reference in a new issue