""" Endobest Script Template Template for creating scripts to access Endobest clinical research platform data. FEATURES: - Multi-microservice authentication (IAM, RC, GDD) - Thread-safe HTTP client pool - Multithreading with main pool + subtasks pool - Automatic retry with token refresh on 401 - Progress bars and logging - Utilities for JSON navigation HOW TO USE: 1. Configure MICROSERVICES dict (comment unused services) 2. Implement your processing logic in main() function 3. Use API templates as examples for your own endpoints 4. Customize constants as needed (timeouts, threads, etc.) QUICK START: - Run script: python eb_script_template.py - Login with credentials (defaults provided) - Choose number of threads - Processing happens in main() TODO block For detailed documentation, see Script_template_spec.md """ import json import logging import os import sys import threading import traceback from concurrent.futures import ThreadPoolExecutor, as_completed from datetime import timedelta from time import perf_counter, sleep import functools import httpx import questionary from tqdm import tqdm from rich.console import Console # ============================================================================ # CONFIGURATION - CREDENTIALS # ============================================================================ DEFAULT_USER_NAME = "ziwig-invest2@yopmail.com" DEFAULT_PASSWORD = "pbrrA765$bP3beiuyuiyhiuy!agx" REALME = "ziwig-pro" # ============================================================================ # CONFIGURATION - MICROSERVICES # ============================================================================ # Comment out unused microservices to skip their token configuration MICROSERVICES = { "IAM": { "app_id": None, # IAM doesn't use app_id "base_url": "https://api-auth.ziwig-connect.com", "endpoints": { "login": "/api/auth/{REALME}/login", # POST : Body = {"username": "{user_name}", "password": "{pass}"} "refresh": "/api/auth/refreshToken", # POST : Body = {"refresh_token": "{refresh_token}"} "get_roles": "/api/profiles/paginate", # POST : Body = {"limit": 100, "currentPage": 1, "sort": [], "filters": {}} "get_user_by_id": "/api/users/find/{user_id}?domaine={REALME}", # GET "get_applications": "/api/applications", # GET "get_profiles_by_app_id": "/api/identity-profiles/paginate", # POST : Body = {"page":null,"limit":100,"search":{},"clientId":"{app_id}","type":"user"} "get_users_by_profile_id": "/api/identity-profiles/{profile_id}/users", # GET } }, "RC": { "app_id": "602aea51-cdb2-4f73-ac99-fd84050dc393", "base_url": "https://api-hcp.ziwig-connect.com", "endpoints": { "config_token": "/api/auth/config-token", # POST : Body = {"userId": "{user_id}", "clientId": "{app_id}", "userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36"}} "refresh": "/api/auth/refreshToken", # POST : Body = {"refresh_token": "{refresh_token}"} "organizations": "/api/inclusions/getAllOrganizations", # GET "statistics": "/api/inclusions/inclusion-statistics", # POST : Body = {"center": "rc_endobest_current_center}}", "protocolId": "{rc_endobest_prot_id}", "excludedCenters": {rc_endobest_excl_centers}} "search_inclusions": "/api/inclusions/search?limit={limit}&page={page}", # POST : Body = {"protocolId": "3c7bcb4d-91ed-4e9f-b93f-99d8447a276e", "center": organization_id, "keywords": ""} "record_by_patient": "/api/records/byPatient", # POST : Body = {"center": "{rc_endobest_current_center}", "patientId": "{patient_id}", "mode": "exchange", "state": "ongoing", "includeEndoParcour": false, "sourceClient": "pro_prm"}, "surveys": "/api/surveys/filter/with-answers", #POST : Body = {"context": "clinic_research", "subject": "{patient_id}", "blockedQcmVersions": {blocked_qcm_versions}} } }, "GDD": { "app_id": "4f5ac063-6a22-4e2c-bda5-b50c0dddab79", "base_url": "https://api-lab.ziwig-connect.com", "endpoints": { "config_token": "/api/auth/config-token", # POST : Body = {"userId": "{user_id}", "clientId": "{app_id}", "userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36"}} "refresh": "/api/auth/refreshToken", # POST : Body = {"refresh_token": "{refresh_token}"} "request_by_tube": "/api/requests/by-tube-id/{tube_id}", # GET } }, "HRD": { "app_id": "93bc44fd-c64b-4fff-a450-f3cba956e934", "base_url": "https://api-resources.ziwig-connect.com", "endpoints": { "config_token": "/api/auth/config-token", # POST : Body = {"userId": "{user_id}", "clientId": "{app_id}", "userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36"}} "refresh": "/api/auth/refreshToken", # POST : Body = {"refresh_token": "{refresh_token}"} "pro_by_id": "api/entity-manager/meta/{model}/data/nodes/pro/{pro_id}?relationships=2", # GET "get_pros_by_endobest_center": "/api/entity-manager/meta/modele_fr/data/orga/{organization_id}/centers/pros?limit=1000", # GET } }, } # ============================================================================ # CONFIGURATION - THREADING # ============================================================================ MAX_THREADS = 20 # Maximum threads for main pool SUBTASKS_POOL_SIZE = 40 # Fixed size for subtasks pool # ============================================================================ # CONFIGURATION - RETRY & TIMEOUTS # ============================================================================ ERROR_MAX_RETRY = 10 # Max retry attempts for API calls WAIT_BEFORE_RETRY = 0.5 # Delay in seconds between retries (fixed) API_TIMEOUT = 60 # Default timeout for API calls (seconds) # ============================================================================ # CONFIGURATION - LOGGING # ============================================================================ LOG_LEVEL = logging.INFO # Change to DEBUG for detailed logs LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s' # LOG_FILE_NAME auto-generated based on script name in __main__ # ============================================================================ # CONFIGURATION - PROGRESS BARS # ============================================================================ BAR_N_FMT_WIDTH = 4 BAR_TOTAL_FMT_WIDTH = 4 BAR_TIME_WIDTH = 8 BAR_RATE_WIDTH = 10 custom_bar_format = ("{l_bar}{bar}" f" {{n_fmt:>{BAR_N_FMT_WIDTH}}}/{{total_fmt:<{BAR_TOTAL_FMT_WIDTH}}} " f"[{{elapsed:<{BAR_TIME_WIDTH}}}<{{remaining:>{BAR_TIME_WIDTH}}}, " f"{{rate_fmt:>{BAR_RATE_WIDTH}}}]{{postfix}}") # ============================================================================ # GLOBAL VARIABLES # ============================================================================ # Tokens storage: {app_name: {"access_token": ..., "refresh_token": ...}} tokens = {} # Thread-safe HTTP client pool (one client per thread) httpx_clients = {} # Thread management threads_list = [] _threads_list_lock = threading.Lock() _token_refresh_lock = threading.Lock() # Thread pools (initialized in main()) main_thread_pool = None subtasks_thread_pool = None # Rich console for formatted output console = Console() # ============================================================================ # UTILITIES # ============================================================================ def get_nested_value(data_structure, path, default=None): """ Extract value from nested dict/list structures with wildcard support. Args: data_structure: Nested dict/list to navigate path: List of keys/indices. Use '*' for list wildcard default: Value to return if path not found Returns: Value at path, or default if not found Examples: get_nested_value({"a": {"b": 1}}, ["a", "b"]) -> 1 get_nested_value({"items": [{"x": 1}, {"x": 2}]}, ["items", "*", "x"]) -> [1, 2] """ if data_structure is None: return "$$$$ No Data" if not path: return default # Handle wildcard in path if "*" in path: wildcard_index = path.index("*") path_before = path[:wildcard_index] path_after = path[wildcard_index+1:] # Helper for non-wildcard path resolution def _get_simple_nested_value(ds, p, d): cl = ds for k in p: if isinstance(cl, dict): cl = cl.get(k) elif isinstance(cl, list): try: if isinstance(k, int) and -len(cl) <= k < len(cl): cl = cl[k] else: return d except (IndexError, TypeError): return d else: return d if cl is None: return d return cl base_level = _get_simple_nested_value(data_structure, path_before, default) if not isinstance(base_level, list): return default results = [] for item in base_level: value = get_nested_value(item, path_after, default) if value is not default and value != "$$$$ No Data": results.append(value) # Flatten one level for multiple wildcards final_results = [] for res in results: if isinstance(res, list): final_results.extend(res) else: final_results.append(res) return final_results # No wildcard - standard traversal current_level = data_structure for key_or_index in path: if isinstance(current_level, dict): current_level = current_level.get(key_or_index) if current_level is None: return default elif isinstance(current_level, list): try: if isinstance(key_or_index, int) and -len(current_level) <= key_or_index < len(current_level): current_level = current_level[key_or_index] else: return default except (IndexError, TypeError): return default else: return default return current_level def get_httpx_client() -> httpx.Client: """ Get or create thread-local HTTP client with keep-alive enabled. Each thread gets its own client to avoid connection conflicts. Returns: httpx.Client instance for current thread """ global httpx_clients thread_id = threading.get_ident() if thread_id not in httpx_clients: httpx_clients[thread_id] = httpx.Client( headers={"Connection": "keep-alive"}, limits=httpx.Limits(max_keepalive_connections=20, max_connections=100) ) return httpx_clients[thread_id] def get_thread_position(): """ Get position of current thread in threads list. Used for managing progress bar positions in multithreaded environment. Returns: Zero-based index of current thread """ global threads_list thread_id = threading.get_ident() with _threads_list_lock: if thread_id not in threads_list: threads_list.append(thread_id) return len(threads_list) - 1 else: return threads_list.index(thread_id) # ============================================================================ # AUTHENTICATION # ============================================================================ def login(): """ Authenticate with IAM and configure tokens for all microservices. Process: 1. Prompt for credentials (with defaults) 2. Login to IAM -> get master_token and user_id 3. For each microservice (except IAM): call config-token API 4. Store access_token and refresh_token for each service Returns: "Success": Authentication succeeded for all services "Error": Authentication failed (can retry) "Exit": User cancelled login """ global tokens # Prompt for credentials user_name = questionary.text("login:", default=DEFAULT_USER_NAME).ask() password = questionary.password("password:", default=DEFAULT_PASSWORD).ask() if not (user_name and password): return "Exit" # Step 1: Login to IAM try: client = get_httpx_client() client.base_url = MICROSERVICES["IAM"]["base_url"] response = client.post( MICROSERVICES["IAM"]["endpoints"]["login"].format(**{**globals(),**locals()}), json={"username": user_name, "password": password}, timeout=20 ) response.raise_for_status() master_token = response.json()["access_token"] user_id = response.json()["userId"] tokens["IAM"] = { "access_token": master_token, "refresh_token": response.json()["refresh_token"] } except (httpx.RequestError, httpx.HTTPStatusError) as exc: print(f"Login Error: {exc}") logging.warning(f"Login Error: {exc}") return "Error" # Step 2: Configure tokens for each microservice for app_name, app_config in MICROSERVICES.items(): if app_name == "IAM": continue # IAM doesn't need config-token try: client = get_httpx_client() client.base_url = app_config["base_url"] response = client.post( app_config["endpoints"]["config_token"].format(**{**globals(),**locals()}), headers={"Authorization": f"Bearer {master_token}"}, json={ "userId": user_id, "clientId": app_config["app_id"], "userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36" }, timeout=20 ) response.raise_for_status() tokens[app_name] = { "access_token": response.json()["access_token"], "refresh_token": response.json()["refresh_token"] } except (httpx.RequestError, httpx.HTTPStatusError) as exc: print(f"Config-token Error for {app_name}: {exc}") logging.warning(f"Config-token Error for {app_name}: {exc}") return "Error" print("\nLogin Success") return "Success" def new_token(app): """ Refresh access token for a specific microservice. Uses refresh_token to obtain new access_token and refresh_token. Thread-safe with lock to prevent concurrent refresh attempts. Args: app: Microservice name (e.g., "RC", "GDD") Raises: httpx.RequestError: If refresh fails after all retries """ global tokens with _token_refresh_lock: for attempt in range(ERROR_MAX_RETRY): try: client = get_httpx_client() client.base_url = MICROSERVICES[app]["base_url"] response = client.post( MICROSERVICES[app]["endpoints"]["refresh"].format(**{**globals(),**locals()}), headers={"Authorization": f"Bearer {tokens[app]['access_token']}"}, json={"refresh_token": tokens[app]["refresh_token"]}, timeout=20 ) response.raise_for_status() tokens[app]["access_token"] = response.json()["access_token"] tokens[app]["refresh_token"] = response.json()["refresh_token"] return except (httpx.RequestError, httpx.HTTPStatusError) as exc: logging.warning(f"Refresh Token Error for {app} (Attempt {attempt + 1}): {exc}") if attempt < ERROR_MAX_RETRY - 1: sleep(WAIT_BEFORE_RETRY) logging.critical(f"Persistent error in refresh_token for {app}") raise httpx.RequestError(message=f"Persistent error in refresh_token for {app}") # ============================================================================ # DECORATORS # ============================================================================ def api_call_with_retry(app): """ Decorator for API calls with automatic retry and token refresh on 401. Features: - Retries on network errors (httpx.RequestError) - Retries on HTTP errors (httpx.HTTPStatusError) - Automatically refreshes token on 401 Unauthorized - Configurable retry count and delay Args: app: Microservice name for token refresh (e.g., "RC", "GDD") Usage: @api_call_with_retry("RC") def get_organizations(): # API call implementation ... Raises: httpx.RequestError: If all retries exhausted """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): func_name = func.__name__ for attempt in range(ERROR_MAX_RETRY): try: return func(*args, **kwargs) except (httpx.RequestError, httpx.HTTPStatusError) as exc: logging.warning(f"Error in {func_name} (Attempt {attempt + 1}/{ERROR_MAX_RETRY}): {exc}") # Auto-refresh token on 401 if isinstance(exc, httpx.HTTPStatusError) and exc.response.status_code == 401: logging.info(f"Token expired for {func_name}. Refreshing token for {app}.") new_token(app) if attempt < ERROR_MAX_RETRY - 1: sleep(WAIT_BEFORE_RETRY) logging.critical(f"Persistent error in {func_name} after {ERROR_MAX_RETRY} attempts.") raise httpx.RequestError(message=f"Persistent error in {func_name}") return wrapper return decorator # ============================================================================ # API TEMPLATES # ============================================================================ # Templates for common API patterns. Duplicate and modify for your needs. # Remember to: # - Choose appropriate HTTP method (GET/POST/PUT/DELETE) # - Update endpoint from MICROSERVICES dict # - Adjust timeout if needed (global API_TIMEOUT or specific value) # - Always return response.json() @api_call_with_retry("RC") def get_all_organizations(): """ Example API call using GET method. Returns: List of organization dictionaries """ client = get_httpx_client() client.base_url = MICROSERVICES["RC"]["base_url"].format(**{**globals(),**locals()}) response = client.get( MICROSERVICES["RC"]["endpoints"]["organizations"], headers={"Authorization": f"Bearer {tokens['RC']['access_token']}"}, timeout=API_TIMEOUT ) response.raise_for_status() return response.json() @api_call_with_retry("RC") def search_inclusions(organization_id, limit, page): """ Example API call using POST method with query params and JSON body. Args: organization_id: Organization UUID limit: Max results per page page: Page number (1-based) Returns: Dict with "data" key containing list of inclusions """ client = get_httpx_client() client.base_url = MICROSERVICES["RC"]["base_url"].format(**{**globals(),**locals()}) response = client.post( f"{MICROSERVICES['RC']['endpoints']['search_inclusions']}?limit={limit}&page={page}", headers={"Authorization": f"Bearer {tokens['RC']['access_token']}"}, json={ "protocolId": "3c7bcb4d-91ed-4e9f-b93f-99d8447a276e", # TODO: Configure if needed "center": organization_id, "keywords": "" }, timeout=API_TIMEOUT ) response.raise_for_status() return response.json() # ============================================================================ # MAIN PROCESSING # ============================================================================ def main(): """ Main processing function. Structure: 1. Authentication 2. Configuration (thread count) 3. Initialization (thread pools, timing) 4. Main processing block (TODO: implement your logic here) 5. Finalization (elapsed time) """ global main_thread_pool, subtasks_thread_pool # ========== AUTHENTICATION ========== print() login_status = login() while login_status == "Error": login_status = login() if login_status == "Exit": return # ========== CONFIGURATION ========== print() number_of_threads = int( questionary.text( "Number of threads:", default="12", validate=lambda x: x.isdigit() and 0 < int(x) <= MAX_THREADS ).ask() ) # ========== INITIALIZATION ========== start_time = perf_counter() # Initialize thread pools main_thread_pool = ThreadPoolExecutor(max_workers=number_of_threads) subtasks_thread_pool = ThreadPoolExecutor(max_workers=SUBTASKS_POOL_SIZE) # ========== MAIN PROCESSING BLOCK ========== print() console.print("[bold cyan]Starting main processing...[/bold cyan]") # TODO: IMPLEMENT YOUR PROCESSING LOGIC HERE # # Example pattern with progress bar and multithreading: # # items = [...] # Your data to process # # with tqdm(total=len(items), desc="Processing items", # bar_format=custom_bar_format) as pbar: # with main_thread_pool as executor: # futures = [executor.submit(process_item, item) for item in items] # # for future in as_completed(futures): # try: # result = future.result() # # Process result here # pbar.update(1) # except Exception as exc: # logging.critical(f"Error in worker: {exc}", exc_info=True) # print(f"\nCRITICAL ERROR: {exc}") # executor.shutdown(wait=False, cancel_futures=True) # raise # # Example: Simple test to verify authentication works # organizations = get_all_organizations() # console.print(f"[green]Retrieved {len(organizations)} organizations[/green]") # ========== FINALIZATION ========== print() print(f"Elapsed time: {str(timedelta(seconds=perf_counter() - start_time))}") # ============================================================================ # ENTRY POINT # ============================================================================ if __name__ == '__main__': # ========== LOGGING CONFIGURATION ========== # Auto-generate log filename based on script name script_name = os.path.splitext(os.path.basename(__file__))[0] log_file_name = f"{script_name}.log" logging.basicConfig( level=LOG_LEVEL, format=LOG_FORMAT, filename=log_file_name, filemode='w' ) # ========== MAIN EXECUTION ========== try: main() except Exception as e: logging.critical(f"Script terminated with exception: {e}", exc_info=True) print(f"\nScript stopped due to error: {e}") print(traceback.format_exc()) finally: # ========== CLEANUP ========== # Shutdown thread pools gracefully if 'main_thread_pool' in globals() and main_thread_pool: main_thread_pool.shutdown(wait=False, cancel_futures=True) if 'subtasks_thread_pool' in globals() and subtasks_thread_pool: subtasks_thread_pool.shutdown(wait=False, cancel_futures=True) # Pause before exit (prevents console from closing immediately when launched from Windows Explorer) print('\n') input("Press Enter to exit...")