Skip to content

Commit 6195b1d

Browse files
committed
refactor(ct,solr): streamline schema backup logic and signal handling #11959
- Moved cleanup function to improve organization and ensure proper signal handling setup. - Refactored schema backup process by introducing a dedicated filename generation method. (No longer rely on capturing the filename from an echo in the method, which makes logging in it much harder) - Enhanced robustness by fixing issues with Solr authentication header encoding and improving metadata change logging. - Addressed minor code consistency improvements and reduced redundancy.
1 parent eb4f762 commit 6195b1d

1 file changed

Lines changed: 32 additions & 24 deletions

File tree

conf/solr/solr-driver.sh

Lines changed: 32 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ set -euo pipefail
88
# This script has two modes: watching and one-shot.
99
#
1010
# In watching mode, it will:
11-
# 1. Watch changes for changes to the Dataverse Metadata Fields by polling the REST API
11+
# 1. Watch for changes to the Dataverse Metadata Fields by polling the REST API
1212
# 2. Download the field definitions and apply them using update-fields.sh
1313
# 3. Make sure there are actually changes between the current and the new schema.xml
1414
# 4. Create a backup copy of the live schema.xml before replacing it
@@ -64,7 +64,7 @@ DEFAULT_WAIT_RETRY_PERIOD="5"
6464
DEFAULT_WAIT_MAX_RETRIES="60"
6565
DEFAULT_UPGRADE_MODE="false"
6666
# Note: this is specific to the configbaker container use case. Override with -P!
67-
DEFAULT_UPGRADE_SOURCE_PATH="${SOLR_TEMPLATE}/conf/schema.xml"
67+
DEFAULT_UPGRADE_SOURCE_PATH="${SOLR_TEMPLATE:-/opt/solr/template}/conf/schema.xml"
6868

6969
# Initialize from environment or defaults
7070
DATAVERSE_URL="${DATAVERSE_URL:-${DEFAULT_DATAVERSE_URL}}"
@@ -128,20 +128,6 @@ mark_not_ready() {
128128
fi
129129
}
130130

131-
# Cleanup function
132-
cleanup() {
133-
log_info "Shutting down..."
134-
mark_not_ready
135-
release_schema_lock
136-
if [[ "${HEALTH_CHECKS_ENABLED}" == "true" ]]; then
137-
rm -f "${LIVENESS_FILE}" 2>/dev/null || true
138-
fi
139-
exit 0
140-
}
141-
142-
# Set up signal handlers
143-
trap cleanup SIGTERM SIGINT SIGQUIT
144-
145131
# Usage information
146132
usage() {
147133
cat << EOF
@@ -401,10 +387,24 @@ release_schema_lock() {
401387
if [[ -n "${LOCK_FD}" ]]; then
402388
log_info "Releasing schema lock"
403389
exec {LOCK_FD}>&- 2>/dev/null || true
404-
unset LOCK_FD
390+
LOCK_FD=""
391+
fi
392+
}
393+
394+
# Cleanup function
395+
cleanup() {
396+
log_info "Shutting down..."
397+
mark_not_ready
398+
release_schema_lock
399+
if [[ "${HEALTH_CHECKS_ENABLED}" == "true" ]]; then
400+
rm -f "${LIVENESS_FILE}" 2>/dev/null || true
405401
fi
402+
exit 0
406403
}
407404

405+
# Set up signal handlers (all necessary functions have been setup beforehand)
406+
trap cleanup SIGTERM SIGINT SIGQUIT
407+
408408
# Initialize working directory
409409
init_work_dir() {
410410
if ! mkdir -p "${WORK_DIR}"; then
@@ -631,25 +631,30 @@ schema_has_changes() {
631631
return 0
632632
}
633633

634-
# Backup current schema
635-
backup_schema() {
634+
generate_backup_filename() {
636635
local schema_file="$1"
637636
# shellcheck disable=2155
638637
local timestamp="$(date +'%Y%m%d_%H%M%S')"
639638
local backup_file="${schema_file}.backup.${timestamp}"
639+
echo "$backup_file"
640+
}
641+
642+
# Backup current schema
643+
backup_schema() {
644+
local schema_file="$1"
645+
local backup_file="$2"
640646

641647
if [[ ! -f "${schema_file}" ]]; then
642648
log_warn "No existing schema to backup"
643649
return 0
644650
fi
645651

646-
log_info "Backing up schema to ${backup_file}"
652+
log_info "Backing up schema ${schema_file} to ${backup_file}"
647653
if ! cp "${schema_file}" "${backup_file}"; then
648654
log_error "Failed to backup schema"
649655
return 1
650656
fi
651657

652-
echo "${backup_file}"
653658
return 0
654659
}
655660

@@ -788,8 +793,9 @@ process_schema_update() {
788793

789794
# Critical section begins here
790795
{
796+
backup_file=$(generate_backup_filename "${SCHEMA_TARGET_PATH}")
791797
# Step 4: Backup current schema
792-
if ! backup_file=$(backup_schema "${SCHEMA_TARGET_PATH}"); then
798+
if ! backup_schema "${SCHEMA_TARGET_PATH}" "${backup_file}"; then
793799
release_schema_lock
794800
return 1
795801
fi
@@ -889,7 +895,7 @@ run_watch() {
889895
needs_update="true"
890896
pending_metadata_file="${metadata_file}"
891897
else
892-
log_debug "No metadata changes detected"
898+
log_info "No metadata changes detected"
893899
fi
894900
else
895901
log_error "Failed to fetch metadata fields, will retry"
@@ -1071,7 +1077,7 @@ main() {
10711077
fi
10721078

10731079
if [[ -n "${SOLR_USERNAME:-}" && -n "${SOLR_PASSWORD:-}" ]]; then
1074-
SOLR_AUTH_HEADER="Authorization: Basic $(echo -n "${SOLR_USERNAME}:${SOLR_PASSWORD}" | base64 -w 0)"
1080+
SOLR_AUTH_HEADER="Authorization: Basic $(echo -n "${SOLR_USERNAME}:${SOLR_PASSWORD}" | base64 | tr -d '\n')"
10751081
log_info "Solr authentication configured (HTTP Basic)"
10761082
fi
10771083

@@ -1157,9 +1163,11 @@ main() {
11571163
case "${MODE}" in
11581164
watch)
11591165
run_watch
1166+
exit $?
11601167
;;
11611168
oneshot)
11621169
run_oneshot
1170+
exit $?
11631171
;;
11641172
esac
11651173
}

0 commit comments

Comments
 (0)