6_Month_Visit using POST API

This commit is contained in:
2026-02-03 20:54:54 +01:00
parent 8478604103
commit 0e842eb0fb
12 changed files with 536 additions and 229700 deletions

View File

@@ -56,6 +56,8 @@ from eb_dashboard_constants import (
GDD_URL,
ERROR_MAX_RETRY,
WAIT_BEFORE_RETRY,
WAIT_BEFORE_NEW_BATCH_OF_RETRIES,
MAX_BATCHS_OF_RETRIES,
MAX_THREADS,
RC_ENDOBEST_PROTOCOL_ID,
RC_ENDOBEST_EXCLUDED_CENTERS,
@@ -73,7 +75,7 @@ from eb_dashboard_constants import (
API_RC_SEARCH_INCLUSIONS_ENDPOINT,
API_RC_GET_RECORD_BY_PATIENT_ENDPOINT,
API_RC_GET_SURVEYS_ENDPOINT,
API_RC_GET_VISIT_ENDPOINT,
API_RC_SEARCH_VISITS_ENDPOINT,
API_GDD_GET_REQUEST_BY_TUBE_ID_ENDPOINT
)
@@ -81,8 +83,10 @@ from eb_dashboard_constants import (
from eb_dashboard_utils import (
get_nested_value,
get_httpx_client,
clear_httpx_client,
get_thread_position,
get_config_path
get_config_path,
thread_local_storage
)
from eb_dashboard_quality_checks import (
backup_output_files,
@@ -116,6 +120,7 @@ _token_refresh_lock = threading.Lock()
_threads_list_lock = threading.Lock()
global_pbar = None
_global_pbar_lock = threading.Lock()
_user_interaction_lock = threading.Lock()
# Global variables (mutable, set at runtime - not constants)
inclusions_mapping_config = []
@@ -172,9 +177,11 @@ def new_token():
return
except httpx.RequestError as exc:
logging.warning(f"Refresh Token Error (Attempt {attempt + 1}) : {exc}")
clear_httpx_client()
except httpx.HTTPStatusError as exc:
logging.warning(
f"Refresh Token Error (Attempt {attempt + 1}) : {exc.response.status_code} for Url {exc.request.url}")
clear_httpx_client()
finally:
if attempt < ERROR_MAX_RETRY - 1:
sleep(WAIT_BEFORE_RETRY)
@@ -187,19 +194,62 @@ def api_call_with_retry(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
func_name = func.__name__
for attempt in range(ERROR_MAX_RETRY):
try:
return func(*args, **kwargs)
except (httpx.RequestError, httpx.HTTPStatusError) as exc:
logging.warning(f"Error in {func_name} (Attempt {attempt + 1}/{ERROR_MAX_RETRY}): {exc}")
if isinstance(exc, httpx.HTTPStatusError) and exc.response.status_code == 401:
logging.info(f"Token expired for {func_name}. Refreshing token.")
new_token()
if attempt < ERROR_MAX_RETRY - 1:
sleep(WAIT_BEFORE_RETRY)
logging.critical(f"Persistent error in {func_name} after {ERROR_MAX_RETRY} attempts.")
raise httpx.RequestError(message=f"Persistent error in {func_name}")
total_attempts = 0
batch_count = 1
while True:
for attempt in range(ERROR_MAX_RETRY):
total_attempts += 1
try:
return func(*args, **kwargs)
except (httpx.RequestError, httpx.HTTPStatusError) as exc:
logging.warning(f"Error in {func_name} (Attempt {total_attempts}): {exc}")
# Refresh the thread-local client if an error occurs
# to avoid potential pool corruption or stale connections
clear_httpx_client()
if isinstance(exc, httpx.HTTPStatusError) and exc.response.status_code == 401:
logging.info(f"Token expired for {func_name}. Refreshing token.")
new_token()
if attempt < ERROR_MAX_RETRY - 1:
sleep(WAIT_BEFORE_RETRY)
else:
# Max retries reached for this batch
if batch_count < MAX_BATCHS_OF_RETRIES:
logging.warning(f"Batch {batch_count}/{MAX_BATCHS_OF_RETRIES} failed for {func_name}. "
f"Waiting {WAIT_BEFORE_NEW_BATCH_OF_RETRIES}s before automatic retry batch.")
batch_count += 1
sleep(WAIT_BEFORE_NEW_BATCH_OF_RETRIES)
break # Exit for loop to restart batch in while True
else:
# All automatic batches exhausted, ask the user
with _user_interaction_lock:
console.print(f"\n[bold red]Persistent error in {func_name} after {batch_count} batches ({total_attempts} attempts).[/bold red]")
console.print(f"[red]Exception: {exc}[/red]")
choice = questionary.select(
f"What would you like to do for {func_name}?",
choices=[
"Retry (try another batch of retries)",
"Ignore (return None and continue)",
"Stop script (critical error)"
]
).ask()
if choice == "Retry (try another batch of retries)":
logging.info(f"User chose to retry {func_name}. Restarting batch sequence.")
batch_count = 1 # Reset batch counter for the next interactive round
break # Exit for loop to restart batch in while True
elif choice == "Ignore (return None and continue)":
# Retrieve context if available
ctx = getattr(thread_local_storage, "current_patient_context", {"id": "Unknown", "pseudo": "Unknown"})
logging.warning(f"[IGNORE] User opted to skip {func_name} for Patient {ctx['id']} ({ctx['pseudo']}). Error: {exc}")
return None
else:
logging.critical(f"User chose to stop script after persistent error in {func_name}.")
raise httpx.RequestError(message=f"Persistent error in {func_name} (stopped by user)")
return wrapper
@@ -446,23 +496,29 @@ def load_organizations_mapping_config():
def _find_questionnaire_by_id(qcm_dict, qcm_id):
"""Finds a questionnaire by ID (direct dictionary lookup)."""
if not isinstance(qcm_dict, dict):
return None
qcm_data = qcm_dict.get(qcm_id)
return qcm_data.get("answers") if qcm_data else None
def _find_questionnaire_by_name(qcm_dict, name):
"""Finds a questionnaire by name (sequential search, returns first match)."""
for qcm_data in qcm_dict.values():
if get_nested_value(qcm_data, ["questionnaire", "name"]) == name:
return qcm_data.get("answers")
if not isinstance(qcm_dict, dict):
return None
for qcm in qcm_dict.values():
if get_nested_value(qcm, ["questionnaire", "name"]) == name:
return qcm.get("answers")
return None
def _find_questionnaire_by_category(qcm_dict, category):
"""Finds a questionnaire by category (sequential search, returns first match)."""
for qcm_data in qcm_dict.values():
if get_nested_value(qcm_data, ["questionnaire", "category"]) == category:
return qcm_data.get("answers")
if not isinstance(qcm_dict, dict):
return None
for qcm in qcm_dict.values():
if get_nested_value(qcm, ["questionnaire", "category"]) == category:
return qcm.get("answers")
return None
@@ -857,15 +913,21 @@ def get_request_by_tube_id(tube_id):
@api_call_with_retry
def get_visit_by_id(visit_id):
"""Fetches a full visit object by its ID."""
def search_visit_by_pseudo_and_order(pseudo, order):
"""Searches for a visit by patient pseudo and visit order."""
client = get_httpx_client()
client.base_url = RC_URL
response = client.get(API_RC_GET_VISIT_ENDPOINT.format(visit_id=visit_id),
headers={"Authorization": f"Bearer {access_token}"},
timeout=API_TIMEOUT)
response = client.post(API_RC_SEARCH_VISITS_ENDPOINT,
headers={"Authorization": f"Bearer {access_token}"},
json={"visitOrder": order, "keywords": pseudo},
timeout=API_TIMEOUT)
response.raise_for_status()
return response.json()
resp_json = response.json()
if isinstance(resp_json, dict):
data = resp_json.get("data")
if isinstance(data, list) and len(data) > 0:
return data[0]
return None
@api_call_with_retry
@@ -880,8 +942,15 @@ def get_all_questionnaires_by_patient(patient_id, record_data):
}
# Extract blockedQcmVersions from record (same logic as get_questionnaire_answers)
all_blocked_versions = get_nested_value(record_data, path=["record", "protocol_inclusions", 0, "blockedQcmVersions"],
default=[])
if record_data is None:
all_blocked_versions = []
else:
all_blocked_versions = get_nested_value(record_data, path=["record", "protocol_inclusions", 0, "blockedQcmVersions"],
default=[])
# Ensure it's a list even if get_nested_value returns "$$$$ No Data"
if all_blocked_versions == "$$$$ No Data":
all_blocked_versions = []
if all_blocked_versions:
payload["blockedQcmVersions"] = all_blocked_versions
@@ -1055,49 +1124,45 @@ def _process_inclusion_data(inclusion, organization):
"""Processes a single inclusion record and returns a dictionary."""
organization_id = organization["id"]
patient_id = get_nested_value(inclusion, path=["id"])
pseudo = get_nested_value(inclusion, path=["pseudo"], default="Unknown")
# Set thread-local context for detailed error logging in decorators
thread_local_storage.current_patient_context = {"id": patient_id, "pseudo": pseudo}
# Initialize empty output structure
output_inclusion = {}
# --- Prepare all data sources ---
# Prepare inclusion_data: enrich inclusion with organization info
# 1. Launch Visit Search asynchronously (it's slow, ~5s)
visit_future = subtasks_thread_pool.submit(search_visit_by_pseudo_and_order, pseudo, 2)
# 2. Prepare inclusion_data: enrich inclusion with organization info
inclusion_data = dict(inclusion)
inclusion_data["organization_id"] = organization_id
inclusion_data["organization_name"] = organization["name"]
if "Center_Name" in organization:
inclusion_data["center_name"] = organization["Center_Name"]
# Prepare record_data
# 3. Prepare record_data (sequential as it's often needed for questionnaires)
record_data = get_record_by_patient_id(patient_id, organization_id)
# Get tube_id for request
# 4. Get tube_id for request and launch in parallel with questionnaires
tube_id = get_nested_value(record_data, path=["record", "clinicResearchData", 0, "requestMetaData", "tubeId"], default="undefined")
# Launch in parallel: request and questionnaires
request_future = subtasks_thread_pool.submit(get_request_by_tube_id, tube_id)
all_questionnaires = get_all_questionnaires_by_patient(patient_id, record_data)
# --- Fetch 6-month visit data if it exists ---
six_month_visit_id = None
six_month_visit_data = None
# Record -> protocol_inclusions --> 0 --> visits[] (order == 2)
protocol_inclusions = get_nested_value(record_data, ["record", "protocol_inclusions"], default=[])
if protocol_inclusions:
visits = protocol_inclusions[0].get("visits", [])
for v in visits:
if v.get("order") == 2:
six_month_visit_id = v.get("id")
break
if six_month_visit_id:
try:
six_month_visit_data = get_visit_by_id(six_month_visit_id)
except Exception as e:
logging.error(f"Error fetching 6-month visit {six_month_visit_id} for patient {patient_id}: {e}")
# --- Synchronize all asynchronous tasks ---
try:
request_data = request_future.result()
except Exception as e:
logging.error(f"Error fetching request data for patient {patient_id}: {e}")
request_data = None
# Wait for request to complete
request_data = request_future.result()
try:
six_month_visit_data = visit_future.result()
except Exception as e:
logging.error(f"Error searching 6-month visit for patient {pseudo}: {e}")
six_month_visit_data = None
# --- Process all fields from configuration ---
process_inclusions_mapping(output_inclusion, inclusion_data, record_data, request_data, all_questionnaires, six_month_visit_data)