feature: 任务模块添加历史数据清理功能

This commit is contained in:
luoja 2025-04-13 23:07:31 +08:00
parent 8e0e78df23
commit 14bfd88cce
5 changed files with 54 additions and 3 deletions

View File

@ -14,6 +14,7 @@ from database.sql_db.dao.dao_apscheduler import (
delete_apscheduler_running,
select_apscheduler_running_log,
truncate_apscheduler_running,
delete_expire_data,
)
from config.dashgo_conf import SqlDbConf
import paramiko
@ -63,7 +64,7 @@ def run_script(
run_cmd = RUN_CMD[script_type]
if type == 'local':
# 如果本地是中文版windows的话需要gbk解码
encoding='gbk' if suffix == '.bat' else 'utf-8'
encoding = 'gbk' if suffix == '.bat' else 'utf-8'
# 创建文件
with tempfile.NamedTemporaryFile(
delete=False,
@ -244,6 +245,11 @@ def run_script(
ssh.close()
def delete_expire_data_for_cron(day):
delete_expire_data(day)
CLEAR_JOB_ID = 'sys_delete_expire_data_for_cron'
class SchedulerService(rpyc.Service):
def exposed_add_job(self, func, *args, **kwargs):
kwargs['kwargs'] = list(kwargs['kwargs'])
@ -276,6 +282,8 @@ class SchedulerService(rpyc.Service):
jobs = scheduler.get_jobs(jobstore)
result = []
for job in jobs:
if job.id == CLEAR_JOB_ID:
continue
result.append(self.get_job_dict(job))
return json.dumps(result, ensure_ascii=False)
@ -321,6 +329,29 @@ if __name__ == '__main__':
scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
scheduler.start()
protocol_config = {'allow_public_attrs': True}
# 添加清理作业
try:
scheduler.remove_job(CLEAR_JOB_ID)
print('清理作业删除成功')
except:
pass
scheduler.add_job(
'app_apscheduler:delete_expire_data_for_cron',
'cron',
kwargs=[
('day', ApSchedulerConf.DATA_EXPIRE_DAY),
],
year='*',
week='*',
second=0,
minute=0,
hour=1,
day='*',
month='*',
day_of_week='*',
id=CLEAR_JOB_ID,
)
print(f'清理作业添加成功,保留天数为{ApSchedulerConf.DATA_EXPIRE_DAY}')
server = ThreadedServer(SchedulerService, hostname=ApSchedulerConf.HOST, port=ApSchedulerConf.PORT, protocol_config=protocol_config)
try:
server.start()

View File

@ -63,7 +63,8 @@ OAuth2AuthorizationCodeLength = 32
OAuth2TokenExpiresInMinutes = 120
[ApSchedulerConf]
host = 127.0.0.1
port = 8091
DATA_EXPIRE_DAY = 90
HOST = 127.0.0.1
PORT = 8091
[SqlCacheConf]

View File

@ -78,6 +78,7 @@ class OAuth2Conf(metaclass=BaseMetaConf):
class ApSchedulerConf(metaclass=BaseMetaConf):
DATA_EXPIRE_DAY: int
HOST: str
PORT: int

View File

@ -28,6 +28,7 @@ def render_content(menu_access: MenuAccess, **kwargs):
id='task-log-job-id-select',
options=[{'label': job.job_id, 'value': job.job_id} for job in get_apscheduler_all_jobs()],
style={'width': 700},
listHeight=200,
prefix=fac.AntdIcon(icon='bi-table'),
),
fac.AntdButton('查询执行记录', id='task-log-get-start-datetime-btn'),
@ -38,6 +39,7 @@ def render_content(menu_access: MenuAccess, **kwargs):
fac.AntdSelect(
id='task-log-start-datetime-select',
prefix=fac.AntdIcon(icon='md-query-builder'),
listHeight=500,
style={'width': 700},
),
fac.AntdButton('查询执行日志', id='task-log-get-log-btn'),

View File

@ -169,3 +169,19 @@ def insert_apscheduler_result(job_id, status, log, start_datetime, extract_names
except Exception as e:
logger.error(f'插入任务结果时发生未知错误: {e}')
raise Exception('Failed to insert apscheduler result due to an unknown error') from e
def delete_expire_data(day):
# 删除ApschedulerResults和ApschedulerExtractValue超时的数据
try:
database = db()
with database.atomic():
expire_time = datetime.now() - timedelta(days=day)
ApschedulerResults.delete().where(ApschedulerResults.start_datetime < expire_time).execute()
ApschedulerExtractValue.delete().where(ApschedulerExtractValue.start_datetime < expire_time).execute()
except IntegrityError as e:
logger.error(f'删除超时数据时发生数据库完整性错误: {e}')
raise Exception('Failed to delete expired data due to integrity error') from e
except Exception as e:
logger.error(f'删除超时数据时发生未知错误: {e}')
raise Exception('Failed to delete expired data due to an unknown error') from e