also add a callback for process start

This commit is contained in:
nanos 2023-03-21 11:56:34 +00:00
parent 586888c761
commit 79b69e7fdc
3 changed files with 14 additions and 3 deletions

View file

@ -32,7 +32,7 @@ jobs:
path: artifacts path: artifacts
- name: Get Directory structure - name: Get Directory structure
run: ls -lR run: ls -lR
- run: python find_posts.py --lock-hours=0 --access-token=${{ secrets.ACCESS_TOKEN }} --server=${{ vars.MASTODON_SERVER }} --reply-interval-in-hours=${{ vars.REPLY_INTERVAL_IN_HOURS || 0 }} --home-timeline-length=${{ vars.HOME_TIMELINE_LENGTH || 0 }} --max-followings=${{ vars.MAX_FOLLOWINGS || 0 }} --user=${{ vars.USER }} --max-followers=${{ vars.MAX_FOLLOWERS || 0 }} --http-timeout=${{ vars.HTTP_TIMEOUT || 5 }} --max-follow-requests=${{ vars.MAX_FOLLOW_REQUESTS || 0 }} --finished-callback=${{ vars.FINISHED_CALLBACK }} - run: python find_posts.py --lock-hours=0 --access-token=${{ secrets.ACCESS_TOKEN }} --server=${{ vars.MASTODON_SERVER }} --reply-interval-in-hours=${{ vars.REPLY_INTERVAL_IN_HOURS || 0 }} --home-timeline-length=${{ vars.HOME_TIMELINE_LENGTH || 0 }} --max-followings=${{ vars.MAX_FOLLOWINGS || 0 }} --user=${{ vars.USER }} --max-followers=${{ vars.MAX_FOLLOWERS || 0 }} --http-timeout=${{ vars.HTTP_TIMEOUT || 5 }} --max-follow-requests=${{ vars.MAX_FOLLOW_REQUESTS || 0 }} --finished-callback=${{ vars.FINISHED_CALLBACK }} --started-callback=${{ vars.STARTED_CALLBACK }}
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:

View file

@ -55,7 +55,8 @@ Keep in mind that [the schedule event can be delayed during periods of high load
If you want to, you can of course also run this script locally as a cron job: If you want to, you can of course also run this script locally as a cron job:
1. To get started, clone this repository. (If you'd rather not clone the full repository, you can simply download the `find_posts.py` file, but don't forget to create a directory called `artifacts` in the same directory: The script expects this directory to be present, and stores information about posts it has already pushed into your instance in that directory, to avoid pushing the same posts over and over again.) 1. To get started, clone this repository. (If you'd rather not clone the full repository, you can simply download the `find_posts.py` file, but don't forget to create a directory called `artifacts` in the same directory: The script expects this directory to be present, and stores information about posts it has already pushed into your instance in that directory, to avoid pushing the same posts over and over again.)
2. Then simply run this script like so: `python find_posts.py --access-token=<TOKEN> --server=<SERVER>` etc. (run `python find_posts.py -h` to get a list of all options) 2. Install requirements: `pip install -r requirements.txt`
3. Then simply run this script like so: `python find_posts.py --access-token=<TOKEN> --server=<SERVER>` etc. (run `python find_posts.py -h` to get a list of all options)
When setting up your cronjob, we are using file based locking to avoid multiple overlapping executions of the script. The timeout period for the lock can be configured using `--lock-hours`. When setting up your cronjob, we are using file based locking to avoid multiple overlapping executions of the script. The timeout period for the lock can be configured using `--lock-hours`.

View file

@ -10,6 +10,7 @@ import sys
import requests import requests
import time import time
import argparse import argparse
import uuid
argparser=argparse.ArgumentParser() argparser=argparse.ArgumentParser()
@ -24,6 +25,7 @@ argparser.add_argument('--max-follow-requests', required = False, type=int, defa
argparser.add_argument('--http-timeout', required = False, type=int, default=5, help="The timeout for any HTTP requests to your own, or other instances.") argparser.add_argument('--http-timeout', required = False, type=int, default=5, help="The timeout for any HTTP requests to your own, or other instances.")
argparser.add_argument('--lock-hours', required = False, type=int, default=24, help="The lock timeout in hours.") argparser.add_argument('--lock-hours', required = False, type=int, default=24, help="The lock timeout in hours.")
argparser.add_argument('--finished-callback', required = False, default=None, help="Provide a callback url that will be pinged when processing is complete. You can use this for 'dead man switch' monitoring of your task") argparser.add_argument('--finished-callback', required = False, default=None, help="Provide a callback url that will be pinged when processing is complete. You can use this for 'dead man switch' monitoring of your task")
argparser.add_argument('--started-callback', required = False, default=None, help="Provide a callback url that will be pinged when processing is starting. You can use this for 'dead man switch' monitoring of your task")
def pull_context( def pull_context(
server, server,
@ -710,6 +712,14 @@ if __name__ == "__main__":
log(f"Cannot read logfile age - aborting.") log(f"Cannot read logfile age - aborting.")
sys.exit(1) sys.exit(1)
runId = uuid.uuid4()
if(arguments.started_callback != None):
try:
get(f"{arguments.started_callback}?rid={runId}")
except Exception as ex:
log(f"Error getting callback url: {ex}")
with open(LOCK_FILE, "w", encoding="utf-8") as f: with open(LOCK_FILE, "w", encoding="utf-8") as f:
f.write(f"{datetime.now()}") f.write(f"{datetime.now()}")
@ -762,7 +772,7 @@ if __name__ == "__main__":
if(arguments.finished_callback != None): if(arguments.finished_callback != None):
try: try:
get(arguments.finished_callback) get(f"{arguments.finished_callback}?rid={runId}")
except Exception as ex: except Exception as ex:
log(f"Error getting callback url: {ex}") log(f"Error getting callback url: {ex}")