Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
ce3085a
feat(k8s): create shared directory sidecar - main container (#673)
etlstrauss Mar 3, 2026
ba212da
feat(k8s): prepare variables for coustmization of job-controller (#673)
etlstrauss Mar 18, 2026
095eb05
feat(k8s): configure datastore sidecar for interactive session (#673)
etlstrauss Mar 18, 2026
6eefab5
feat(k8s): adding parsing of tolerations for taints (#673)
etlstrauss Mar 18, 2026
6c9784f
feat(k8s): adding datastore sidecar for interactive session (#673)
etlstrauss Mar 18, 2026
c1d652f
feat(k8s): fix imagePullSecret definition for datastore container (#673)
etlstrauss Mar 23, 2026
9617a02
feat(k8s): adding lifecycle for datastore sidecar (#673)
etlstrauss Mar 23, 2026
805b3da
feat(k8s): enabling of datastore interactive session side car (#673)
etlstrauss Mar 23, 2026
7664272
feat(k8s): change image pull secrets for jobcontroller (#673)
etlstrauss Mar 23, 2026
e85e628
chore(master): using black to reformat python files (#673)
etlstrauss Mar 24, 2026
18f6707
chore(master): merge from master (#673)
etlstrauss Mar 24, 2026
e49f013
chore(k8s): adding configurations based on run-tests.sh (#673)
etlstrauss Mar 24, 2026
5da53ad
feat(k8s): create test for datastore sidecar (#673)
etlstrauss Apr 1, 2026
6ab8e7e
chore(master): merge from upstream master (#673)
etlstrauss Apr 1, 2026
69901d5
feat(k8s): only start datastore when needed (#673)
etlstrauss Apr 13, 2026
9ac953e
feat(k8s): small fixes + reformatting (#673)
etlstrauss Apr 13, 2026
e1a46b3
feat(controller): adding s3 config (#673)
etlstrauss Apr 13, 2026
c8b28c0
feat(controller): reconfigure s3 configuration (#673)
etlstrauss Apr 20, 2026
a7f6a66
feat(k8s): configure k8s s3 (#673)
etlstrauss Apr 20, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
106 changes: 53 additions & 53 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -22,24 +22,24 @@ COPY requirements.txt /code/
# Install all system and Python dependencies in one go
# hadolint ignore=DL3008,DL3013
RUN apt-get update -y && \
apt-get install --no-install-recommends -y \
gcc \
git \
libpcre3 \
libpcre3-dev \
libpython3.12 \
python3-pip \
python3.12 \
python3.12-dev \
vim-tiny && \
pip install --no-cache-dir --upgrade 'setuptools<81' && \
pip install --no-cache-dir -r /code/requirements.txt && \
apt-get remove -y \
gcc \
python3.12-dev && \
apt-get autoremove -y && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
apt-get install --no-install-recommends -y \
gcc \
git \
libpcre3 \
libpcre3-dev \
libpython3.12 \
python3-pip \
python3.12 \
python3.12-dev \
vim-tiny && \
pip install --no-cache-dir --upgrade 'setuptools<81' && \
pip install --no-cache-dir -r /code/requirements.txt && \
apt-get remove -y \
gcc \
python3.12-dev && \
apt-get autoremove -y && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*

# Copy cluster component source code
WORKDIR /code
Expand All @@ -52,19 +52,19 @@ RUN if [ "${DEBUG}" -gt 0 ]; then pip install --no-cache-dir -e ".[debug]"; else
# Are we building with locally-checked-out shared modules?
# hadolint ignore=DL3013
RUN if test -e modules/reana-commons; then \
if [ "${DEBUG}" -gt 0 ]; then \
pip install --no-cache-dir -e "modules/reana-commons[kubernetes]" --upgrade; \
else \
pip install --no-cache-dir "modules/reana-commons[kubernetes]" --upgrade; \
fi \
fi; \
if test -e modules/reana-db; then \
if [ "${DEBUG}" -gt 0 ]; then \
pip install --no-cache-dir -e "modules/reana-db" --upgrade; \
else \
pip install --no-cache-dir "modules/reana-db" --upgrade; \
fi \
fi
if [ "${DEBUG}" -gt 0 ]; then \
pip install --no-cache-dir -e "modules/reana-commons[kubernetes]" --upgrade; \
else \
pip install --no-cache-dir "modules/reana-commons[kubernetes]" --upgrade; \
fi \
fi; \
if test -e modules/reana-db; then \
if [ "${DEBUG}" -gt 0 ]; then \
pip install --no-cache-dir -e "modules/reana-db" --upgrade; \
else \
pip install --no-cache-dir "modules/reana-db" --upgrade; \
fi \
fi

# Check for any broken Python dependencies
RUN pip check
Expand All @@ -75,12 +75,12 @@ ARG UWSGI_MAX_FD=1048576
ARG UWSGI_PROCESSES=2
ARG UWSGI_THREADS=2
ENV FLASK_APP=reana_workflow_controller/app.py \
PYTHONPATH=/workdir \
TERM=xterm \
UWSGI_BUFFER_SIZE=${UWSGI_BUFFER_SIZE:-8192} \
UWSGI_MAX_FD=${UWSGI_MAX_FD:-1048576} \
UWSGI_PROCESSES=${UWSGI_PROCESSES:-2} \
UWSGI_THREADS=${UWSGI_THREADS:-2}
PYTHONPATH=/workdir \
TERM=xterm \
UWSGI_BUFFER_SIZE=${UWSGI_BUFFER_SIZE:-8192} \
UWSGI_MAX_FD=${UWSGI_MAX_FD:-1048576} \
UWSGI_PROCESSES=${UWSGI_PROCESSES:-2} \
UWSGI_THREADS=${UWSGI_THREADS:-2}

# Expose ports to clients
EXPOSE 5000
Expand All @@ -90,22 +90,22 @@ EXPOSE 5000
# while also allowing shell expansion
# hadolint ignore=DL3025
CMD exec uwsgi \
--buffer-size ${UWSGI_BUFFER_SIZE} \
--die-on-term \
--hook-master-start "unix_signal:2 gracefully_kill_them_all" \
--hook-master-start "unix_signal:15 gracefully_kill_them_all" \
--enable-threads \
--http-socket 0.0.0.0:5000 \
--master \
--max-fd ${UWSGI_MAX_FD} \
--module reana_workflow_controller.app:app \
--need-app \
--processes ${UWSGI_PROCESSES} \
--single-interpreter \
--stats /tmp/stats.socket \
--threads ${UWSGI_THREADS} \
--vacuum \
--wsgi-disable-file-wrapper
--buffer-size ${UWSGI_BUFFER_SIZE} \
--die-on-term \
--hook-master-start "unix_signal:2 gracefully_kill_them_all" \
--hook-master-start "unix_signal:15 gracefully_kill_them_all" \
--enable-threads \
--http-socket 0.0.0.0:5000 \
--master \
--max-fd ${UWSGI_MAX_FD} \
--module reana_workflow_controller.app:app \
--need-app \
--processes ${UWSGI_PROCESSES} \
--single-interpreter \
--stats /tmp/stats.socket \
--threads ${UWSGI_THREADS} \
--vacuum \
--wsgi-disable-file-wrapper

# Set image labels
LABEL org.opencontainers.image.authors="team@reanahub.io"
Expand Down
5 changes: 5 additions & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ include Dockerfile
include LICENSE
include pytest.ini
include docs/openapi.json
include sidecars/datastore/Dockerfile
exclude .editorconfig
exclude .prettierrc
exclude .prettierignore
Expand All @@ -33,3 +34,7 @@ recursive-include tests *.py
recursive-include tests *.finished
recursive-include tests *.running
recursive-include tests *.waiting
recursive-include sidecars *.py
recursive-include sidecars *.sh
recursive-include sidecars *.txt

20 changes: 20 additions & 0 deletions reana_workflow_controller/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,8 @@ def _parse_interactive_sessions_environments(env_var):
)
"""Default image for REANA Job Controller sidecar."""

REANA_JOB_CONTROLLER_SECRET = os.getenv("REANA_JOB_CONTROLLER_SECRET")
"""DOptional secret for REANA Job Controller sidecar."""

JOB_CONTROLLER_ENV_VARS = _env_vars_dict_to_k8s_list(
json.loads(os.getenv("REANA_JOB_CONTROLLER_ENV_VARS", "{}"))
Expand Down Expand Up @@ -465,3 +467,21 @@ def _parse_interactive_sessions_environments(env_var):

MAX_WORKFLOW_SHARING_MESSAGE_LENGTH = 5000
"""Maximum length of the user-provided message when sharing a workflow."""


REANA_RUNTIME_JOBS_KUBERNETES_TOLERATIONS = os.getenv(
"REANA_RUNTIME_JOBS_KUBERNETES_TOLERATIONS"
)
"""Tolerations for jobs"""
REANA_DATASTORE_ENABLED = os.getenv("REANA_DATASTORE_ENABLED") == "true"
"""Set datastore (s3) sidecar for interactive sessions enabled or disabled"""

if REANA_DATASTORE_ENABLED:
REANA_DATASTORE_IMAGE = os.getenv("REANA_DATASTORE_IMAGE")
"""Optional Image for datastore (s3) sidecar for interactive sessions"""

REANA_DATASTORE_SECRET = os.getenv("REANA_DATASTORE_SECRET")
"""Optional secret for datastore (s3) sidecar for interactive sessions"""
else:
REANA_DATASTORE_IMAGE = ""
REANA_DATASTORE_SECRET = ""
128 changes: 119 additions & 9 deletions reana_workflow_controller/k8s.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@
REANA_INGRESS_ANNOTATIONS,
REANA_INGRESS_CLASS_NAME,
REANA_INGRESS_HOST,
REANA_DATASTORE_SECRET,
REANA_DATASTORE_IMAGE,
REANA_DATASTORE_ENABLED,
)


Expand Down Expand Up @@ -74,16 +77,42 @@ def __init__(
self.image = image
self.port = port
self.path = path
self.cvmfs_repos = cvmfs_repos or []
self.cvmfs_repos = (cvmfs_repos or [],)
self.image_pull_secrets = []
self.datastore_enabled = False
metadata = client.V1ObjectMeta(
name=deployment_name,
labels={"reana_workflow_mode": "session"},
)
self._session_container = client.V1Container(
name=self.deployment_name, image=self.image, env=[], volume_mounts=[]
name=self.deployment_name,
image=self.image,
env=[],
volume_mounts=[],
ports=[client.V1ContainerPort(container_port=self.port)],
)
containers = [self._session_container]
if REANA_DATASTORE_ENABLED:
user_secrets = UserSecretsStore.fetch(self.owner_id)
all_env = user_secrets.get_env_secrets_as_k8s_spec()
s3_env = [
s for s in all_env if s.get("name", "").startswith("S3_TO_LOCAL_")
]
if s3_env:
self.datastore_enabled = True
if self.datastore_enabled:
self._s3_container = client.V1Container(
name="datastore",
image=REANA_DATASTORE_IMAGE,
env=[],
volume_mounts=[],
ports=[],
image_pull_policy="Always",
)
containers.append(self._s3_container)

self._pod_spec = client.V1PodSpec(
containers=[self._session_container],
containers=containers,
volumes=[],
node_selector=REANA_RUNTIME_SESSIONS_KUBERNETES_NODE_LABEL,
# Disable service discovery with env variables, so that the environment is
Expand Down Expand Up @@ -146,9 +175,15 @@ def _build_service(self, metadata):
type="ClusterIP",
ports=[
client.V1ServicePort(
name="interactive-session",
port=InteractiveDeploymentK8sBuilder.internal_service_port,
target_port=self.port,
)
),
client.V1ServicePort(
name="datastore",
port=5000,
target_port=5000,
),
],
selector={"app": self.deployment_name},
)
Expand Down Expand Up @@ -204,6 +239,61 @@ def add_reana_shared_storage(self):
self._session_container.volume_mounts.append(volume_mount)
self._pod_spec.volumes.append(volume)

def add_image_pull_secrets(self):
"""Attach the configured image pull secrets to scheduler and worker containers."""
if REANA_DATASTORE_SECRET:
self._pod_spec.image_pull_secrets = [
client.V1LocalObjectReference(name=REANA_DATASTORE_SECRET)
]

def setup_s3_storage(self):
"""Configure shared empty_dir volume for S3 sidecar and session container."""
volume_name = "s3-mounts"

volume = client.V1Volume(
name=volume_name, empty_dir=client.V1EmptyDirVolumeSource()
)

volume_mount = client.V1VolumeMount(
name=volume_name,
mount_path="/data/s3/",
mount_propagation="HostToContainer",
)

self._session_container.volume_mounts.append(volume_mount)

volume_mount = client.V1VolumeMount(
name=volume_name, mount_path="/s3-data", mount_propagation="Bidirectional"
)

self._s3_container.volume_mounts.append(volume_mount)

self._pod_spec.volumes.append(volume)

def setup_s3_sidecar(self):
"""Add the sidecar for s3 mounts."""
# Define the volume mount for /dev/fuse
fuse_volume_mount = client.V1VolumeMount(
name="fuse-device", mount_path="/dev/fuse"
)

# Define the volume for /dev/fuse
fuse_volume = client.V1HostPathVolumeSource(path="/dev/fuse")

# Append the volume mount and volume to the session container and pod spec
self._s3_container.volume_mounts.append(fuse_volume_mount)
self._pod_spec.volumes.append(
client.V1Volume(name="fuse-device", host_path=fuse_volume)
)

security_context = client.V1SecurityContext(
run_as_user=0,
allow_privilege_escalation=True,
capabilities=client.V1Capabilities(add=["SYS_ADMIN"]),
privileged=True,
)
self._s3_container.security_context = security_context

def add_cvmfs_repo_mounts(self, cvmfs_repos):
"""Add mounts for the provided CVMFS repositories to the deployment.

Expand All @@ -224,9 +314,7 @@ def add_environment_variable(self, name, value):

def add_run_with_root_permissions(self):
"""Run interactive session with root."""
security_context = client.V1SecurityContext(
run_as_user=0, allow_privilege_escalation=False
)
security_context = client.V1SecurityContext(run_as_user=0, privileged=True)
self._session_container.security_context = security_context

def add_user_secrets(self):
Expand All @@ -239,8 +327,26 @@ def add_user_secrets(self):
self._pod_spec.volumes.append(secrets_volume)
self._session_container.volume_mounts.append(secrets_volume_mount)

# set environment secrets
self._session_container.env += user_secrets.get_env_secrets_as_k8s_spec()
# build env arrays for different containers
# sorting s3 variables to only be mounted in sidecar
all_env = user_secrets.get_env_secrets_as_k8s_spec()
s3_env = []
session_env = []

if self.datastore_enabled:
for secret in all_env:
secret_name = secret.get("name", "")
if secret_name.startswith("S3_TO_LOCAL_"):
s3_env.append(secret)
else:
session_env.append(secret)
# set environment secrets without s3 secrets
self._session_container.env = session_env
# mount s3 secretes
if REANA_DATASTORE_ENABLED:
self._s3_container.env = s3_env
else:
self._session_container.env = all_env

def get_deployment_objects(self):
"""Return the alrady built Kubernetes objects."""
Expand Down Expand Up @@ -298,6 +404,10 @@ def build_interactive_jupyter_deployment_k8s_objects(
)
deployment_builder.add_command_arguments(command_args)
deployment_builder.add_reana_shared_storage()
deployment_builder.add_image_pull_secrets()
if REANA_DATASTORE_ENABLED and deployment_builder.datastore_enabled:
deployment_builder.setup_s3_sidecar()
deployment_builder.setup_s3_storage()
if cvmfs_repos:
deployment_builder.add_cvmfs_repo_mounts(cvmfs_repos)
if expose_secrets:
Expand Down
Loading