from time import time,sleep
from redis import Redis
from rq import Worker, Queue, get_current_job
import requests
import signal
from tenacity import retry, wait_exponential, stop_after_attempt
import atexit
import os
import json
import random
from logger import setup_logger
from broker import generate_dedupe_key
import json
import hashlib
import uuid

# MD5 Hash Generation
def generate_md5_hash(job_data):
   # job_string = json.dumps(job_data, sort_keys = True).encode('utf-8')
    job_data["unique_id"] = str(uuid.uuid4())  # Append unique identifier
    job_string = json.dumps(job_data, sort_keys=True).encode("utf-8")
    return hashlib.md5(job_string).hexdigest()

# Redis connection
redis = Redis(host=os.environ.get('REDIS_HOST', 'localhost'),
              port=os.environ.get('REDIS_PORT', '6379'),
              db=int(os.environ.get('REDIS_DB', 0)))
queue = Queue(os.environ.get('REDIS_QUEUE', 'trello-notifications'), connection=redis)

# Logger setup
logger = setup_logger('worker', log_file='worker.log')

# Rate limit config
MAX_JOBS_PER_MINUTE = int(os.environ.get('MAX_JOBS_PER_MINUTE', 20))
REDIS_KEY = os.environ.get('REDIS_KEY', 'job_count_last_minute')
TIMEOUT = int(os.environ.get('TIMEOUT', 600))
JOBS = []

# Google Apps Script webhook URL
GAS_WEBHOOK_URL = os.environ.get('GAS_WEBHOOK_URL', 'https://script.google.com/macros/s/{script-id}/exec')

# Graceful shutdown flag
graceful_shutdown = False
SID_FILE_PATH = f"{os.path.dirname(__file__)}/{os.environ.get('SID_FILE_PATH', 'sids.json')}"

# Handling SID
def get_latest_sid():
    if not os.path.exists(SID_FILE_PATH):
        return None
    with open(SID_FILE_PATH, 'r') as file:
        data = json.load(file)
        return data[-1]['sid'] if data else None

# Can process job based on rate limit and burst
def can_process_job():
    current_time = int(time())
    redis.zremrangebyscore(REDIS_KEY, '-inf', current_time - 30)
    job_count = redis.zcard(REDIS_KEY)  # Number of jobs processed in last 60 seconds

    logger.info(f"JOB COUNT : {job_count} || MAX_JOBS_PER_MINUTE : {MAX_JOBS_PER_MINUTE}")

    if job_count < MAX_JOBS_PER_MINUTE:
        redis.zadd(REDIS_KEY, {str(current_time): current_time})
        return True
    return False

# Sending data to Google Apps Script with retry mechanism
@retry(stop=stop_after_attempt(2), wait=wait_exponential(min=1, max=60))
def send_to_gas(job_data):
    try:
        response = requests.post(GAS_WEBHOOK_URL.replace('{script-id}', get_latest_sid()), json=job_data)
        response.raise_for_status()
        logger.info("Successfully sent job data to Google Apps Script.")
    except requests.exceptions.RequestException as e:
        logger.error(f"Failed to send data to GAS: {e}")
        raise

# Handling graceful shutdown
def handle_shutdown(signal, frame):
    global graceful_shutdown
    graceful_shutdown = True
    logger.info("Gracefully shutting down the worker...")
    sleep(5)

# Register shutdown signal handler
signal.signal(signal.SIGINT, handle_shutdown)
signal.signal(signal.SIGTERM, handle_shutdown)

# Cleanup when exiting
def cleanup_on_exit():
    if graceful_shutdown:
        logger.info("Worker shutting down, cleaning resources...")

atexit.register(cleanup_on_exit)

# Process job handler
def process_job(job_data, callback = False):
    try:
        job = get_current_job()
        
        if (callback):
            logger.info(f"CALLBACK :: Processing job id: {job.id} | worker id: {os.getpid()} | worker name: {job.worker_name}")

        logger.info(f"Processing job id: {job.id} | worker id: {os.getpid()} | worker name: {job.worker_name}")
        # sleep(random.uniform(2, 6))
        if can_process_job():
           job_hash = generate_md5_hash(job_data)
           logger.info(f"Hash: {job_hash}")
           if redis.get(f"job-hash-{job_hash}"):
             logger.info("Duplicate Job")
             return
           redis.setex(f"job-hash-{job_hash}", 3600, "processed")
           logger.info("Processing job...")
           send_to_gas(job_data)
        else:
            burst_wait_time = random.randint(1, 3)  # Seconds for burst interval before retry
            time_left = 10 - (int(time()) % 10)  # Calculate time until next minute
            logger.info(f"Rate limit reached, bursting possible jobs. Waiting for the rate to reset in {min(burst_wait_time, time_left)}...")

            # More aggressive but rate-limited bursts
            sleep(min(burst_wait_time, time_left))  # Reduce sleep time with bursts if possible
            process_job(job_data, True)  # Reattempt after sleep
    except Exception as e:
        logger.error(f"Error: {e}")
        key = generate_dedupe_key(job_data)
        retries = job_data.get('retries', 0)
        if retries <= 3:
            job_data['retries'] = retries + 1
            sleep(random.randint(1, 3))  # Adjusted retry delay
            process_job(job_data, True)  # Reattempt


# Worker setup & loop
def main():
    worker = Worker([queue], connection=redis)
    while not graceful_shutdown:
        try:
            worker.work(burst=True)  # Activate burst mode for faster job processing
        finally:
            if graceful_shutdown:
                break
            logger.info('Nothing to process, taking a break for 3 seconds...')
            sleep(3)  # Sleep before checking for jobs again

# Start worker
if __name__ == '__main__':
    main()
