Skip to content

Commit e85e628

Browse files
committed
chore(master): using black to reformat python files (reanahub#673)
1 parent 7664272 commit e85e628

7 files changed

Lines changed: 56 additions & 49 deletions

File tree

reana_workflow_controller/config.py

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -321,9 +321,7 @@ def _parse_interactive_sessions_environments(env_var):
321321
)
322322
"""Default image for REANA Job Controller sidecar."""
323323

324-
REANA_JOB_CONTROLLER_SECRET = os.getenv(
325-
"REANA_JOB_CONTROLLER_SECRET"
326-
)
324+
REANA_JOB_CONTROLLER_SECRET = os.getenv("REANA_JOB_CONTROLLER_SECRET")
327325
"""DOptional secret for REANA Job Controller sidecar."""
328326

329327
JOB_CONTROLLER_ENV_VARS = _env_vars_dict_to_k8s_list(
@@ -475,21 +473,15 @@ def _parse_interactive_sessions_environments(env_var):
475473
"REANA_RUNTIME_JOBS_KUBERNETES_TOLERATIONS"
476474
)
477475
"""Tolerations for jobs"""
478-
REANA_DATASTORE_ENABLED = os.getenv(
479-
"REANA_DATASTORE_ENABLED"
480-
) == "true"
476+
REANA_DATASTORE_ENABLED = os.getenv("REANA_DATASTORE_ENABLED") == "true"
481477
"""Set datastore (s3) sidecar for interactive sessions enabled or disabled"""
482478

483-
if(REANA_DATASTORE_ENABLED):
484-
REANA_DATASTORE_IMAGE = os.getenv(
485-
"REANA_DATASTORE_IMAGE"
486-
)
479+
if REANA_DATASTORE_ENABLED:
480+
REANA_DATASTORE_IMAGE = os.getenv("REANA_DATASTORE_IMAGE")
487481
"""Optional Image for datastore (s3) sidecar for interactive sessions"""
488482

489-
REANA_DATASTORE_SECRET = os.getenv(
490-
"REANA_DATASTORE_SECRET"
491-
)
483+
REANA_DATASTORE_SECRET = os.getenv("REANA_DATASTORE_SECRET")
492484
"""Optional secret for datastore (s3) sidecar for interactive sessions"""
493485
else:
494486
REANA_DATASTORE_IMAGE = ""
495-
REANA_DATASTORE_SECRET = ""
487+
REANA_DATASTORE_SECRET = ""

reana_workflow_controller/dask.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
# under the terms of the MIT License; see LICENSE file for more details.
66

77
"""Dask resource manager."""
8+
89
import logging
910
import os
1011
import yaml

reana_workflow_controller/k8s.py

Lines changed: 35 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -79,19 +79,29 @@ def __init__(
7979
self.image = image
8080
self.port = port
8181
self.path = path
82-
self.cvmfs_repos = cvmfs_repos or [],
82+
self.cvmfs_repos = (cvmfs_repos or [],)
8383
self.image_pull_secrets = []
8484
metadata = client.V1ObjectMeta(
8585
name=deployment_name,
8686
labels={"reana_workflow_mode": "session"},
8787
)
8888
self._session_container = client.V1Container(
89-
name=self.deployment_name, image=self.image, env=[], volume_mounts=[], ports=[client.V1ContainerPort(container_port=self.port)]
89+
name=self.deployment_name,
90+
image=self.image,
91+
env=[],
92+
volume_mounts=[],
93+
ports=[client.V1ContainerPort(container_port=self.port)],
9094
)
9195
containers = [self._session_container]
92-
if(REANA_DATASTORE_ENABLED):
96+
if REANA_DATASTORE_ENABLED:
9397
self._s3_container = client.V1Container(
94-
name="datastore", image=REANA_DATASTORE_IMAGE, env=[], volume_mounts=[], ports=[], image_pull_policy="Always", lifecycle=[]
98+
name="datastore",
99+
image=REANA_DATASTORE_IMAGE,
100+
env=[],
101+
volume_mounts=[],
102+
ports=[],
103+
image_pull_policy="Always",
104+
lifecycle=[],
95105
)
96106
containers.append(self._s3_container)
97107

@@ -229,55 +239,58 @@ def setup_s3_storage(self):
229239
volume_name = "s3-mounts"
230240

231241
volume = client.V1Volume(
232-
name=volume_name,
233-
empty_dir=client.V1EmptyDirVolumeSource()
242+
name=volume_name, empty_dir=client.V1EmptyDirVolumeSource()
234243
)
235244

236245
volume_mount = client.V1VolumeMount(
237246
name=volume_name,
238247
mount_path="/data/s3/",
239-
mount_propagation="HostToContainer"
248+
mount_propagation="HostToContainer",
240249
)
241250

242251
self._session_container.volume_mounts.append(volume_mount)
243252

244253
volume_mount = client.V1VolumeMount(
245-
name=volume_name,
246-
mount_path="/s3-data",
247-
mount_propagation="Bidirectional"
254+
name=volume_name, mount_path="/s3-data", mount_propagation="Bidirectional"
248255
)
249256

250257
self._s3_container.volume_mounts.append(volume_mount)
251258

252259
self._pod_spec.volumes.append(volume)
253260

254261
def setup_s3_sidecar(self):
255-
"""Add the sidecar for s3 mounts"""
262+
"""Add the sidecar for s3 mounts."""
256263
# Define the volume mount for /dev/fuse
257264
fuse_volume_mount = client.V1VolumeMount(
258-
name="fuse-device",
259-
mount_path="/dev/fuse"
265+
name="fuse-device", mount_path="/dev/fuse"
260266
)
261267

262268
# Define the volume for /dev/fuse
263-
fuse_volume = client.V1HostPathVolumeSource(
264-
path="/dev/fuse"
265-
)
269+
fuse_volume = client.V1HostPathVolumeSource(path="/dev/fuse")
266270

267271
# Append the volume mount and volume to the session container and pod spec
268272
self._s3_container.volume_mounts.append(fuse_volume_mount)
269-
self._pod_spec.volumes.append(client.V1Volume(name="fuse-device", host_path=fuse_volume))
273+
self._pod_spec.volumes.append(
274+
client.V1Volume(name="fuse-device", host_path=fuse_volume)
275+
)
270276

271277
security_context = client.V1SecurityContext(
272-
run_as_user=0, allow_privilege_escalation=True, capabilities=client.V1Capabilities(add=["SYS_ADMIN"]), privileged=True
278+
run_as_user=0,
279+
allow_privilege_escalation=True,
280+
capabilities=client.V1Capabilities(add=["SYS_ADMIN"]),
281+
privileged=True,
273282
)
274283
self._s3_container.security_context = security_context
275284

276285
# adding umount for correct termination
277286
lifecycle_dict = {
278287
"preStop": {
279288
"exec": {
280-
"command": ["/bin/sh", "-c", "xargs umount -l < /etc/active_mounts.txt || true"]
289+
"command": [
290+
"/bin/sh",
291+
"-c",
292+
"xargs umount -l < /etc/active_mounts.txt || true",
293+
]
281294
}
282295
}
283296
}
@@ -304,9 +317,7 @@ def add_environment_variable(self, name, value):
304317

305318
def add_run_with_root_permissions(self):
306319
"""Run interactive session with root."""
307-
security_context = client.V1SecurityContext(
308-
run_as_user=0, privileged=True
309-
)
320+
security_context = client.V1SecurityContext(run_as_user=0, privileged=True)
310321
self._session_container.security_context = security_context
311322

312323
def add_user_secrets(self):
@@ -337,7 +348,7 @@ def add_user_secrets(self):
337348
self._session_container.env = session_env
338349

339350
# mount s3 secretes
340-
if(REANA_DATASTORE_ENABLED):
351+
if REANA_DATASTORE_ENABLED:
341352
self._s3_container.env = s3_env
342353

343354
def get_deployment_objects(self):
@@ -397,7 +408,7 @@ def build_interactive_jupyter_deployment_k8s_objects(
397408
deployment_builder.add_command_arguments(command_args)
398409
deployment_builder.add_reana_shared_storage()
399410
deployment_builder.add_image_pull_secrets()
400-
if(REANA_DATASTORE_ENABLED):
411+
if REANA_DATASTORE_ENABLED:
401412
deployment_builder.setup_s3_sidecar()
402413
deployment_builder.setup_s3_storage()
403414
if cvmfs_repos:

reana_workflow_controller/rest/workflows_session.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
"""REANA Workflow Controller interactive sessions REST API."""
1010

11-
1211
from flask import Blueprint, jsonify, request
1312
from webargs import fields
1413
from webargs.flaskparser import use_kwargs
@@ -18,7 +17,6 @@
1817

1918
from reana_workflow_controller.workflow_run_manager import KubernetesWorkflowRunManager
2019

21-
2220
blueprint = Blueprint("workflows_session", __name__)
2321

2422

reana_workflow_controller/workflow_run_manager.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
# under the terms of the MIT License; see LICENSE file for more details.
66

77
"""Workflow run manager interface."""
8+
89
import base64
910
import copy
1011
import json
@@ -876,7 +877,7 @@ def _create_job_spec(
876877

877878
if REANA_JOB_CONTROLLER_SECRET:
878879
spec.template.spec.image_pull_secrets = [
879-
client.V1LocalObjectReference(name= REANA_JOB_CONTROLLER_SECRET)
880+
client.V1LocalObjectReference(name=REANA_JOB_CONTROLLER_SECRET)
880881
]
881882

882883
# filter out volumes with the same name

sidecars/datastore/app.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,12 @@ def createFolders(aliases, base_dir):
1818
print(f"A error accrued during the creation of the s3-buckets: {e}")
1919
return False
2020

21+
2122
def getCredentials(aliases):
2223
aliases_credentials = []
2324
try:
2425
for alias in aliases:
25-
temp_list =[alias]
26+
temp_list = [alias]
2627
temp_list.append(os.getenv(f"S3_TO_LOCAL_{alias}_BUCKET"))
2728
temp_list.append(os.getenv(f"S3_TO_LOCAL_{alias}_HOST"))
2829
temp_list.append(os.getenv(f"S3_TO_LOCAL_{alias}_REGION"))
@@ -33,16 +34,17 @@ def getCredentials(aliases):
3334
except Exception as e:
3435
print(f"A error accrued during load of S3 credentials: {e}")
3536
return False
36-
37+
38+
3739
def createS3Mounts(aliases, base_dir):
38-
for i in (0, len(aliases)-2):
40+
for i in (0, len(aliases) - 2):
3941
try:
4042
with open(".passwd-s3fs", mode="w", encoding="utf-8") as f:
4143
f.write(f"{aliases[i][4]}:{aliases[i][5]}")
4244
os.system("chmod 600 .passwd-s3fs")
4345
target_path = os.path.join(base_dir, aliases[i][0])
4446
target_path = os.path.join(target_path, aliases[i][1])
45-
cmd=f"s3fs {aliases[i][1]} {target_path} -o passwd_file=.passwd-s3fs -o url={aliases[i][2]} -o endpoint={aliases[i][3]} -o use_path_request_style -o allow_other"
47+
cmd = f"s3fs {aliases[i][1]} {target_path} -o passwd_file=.passwd-s3fs -o url={aliases[i][2]} -o endpoint={aliases[i][3]} -o use_path_request_style -o allow_other"
4648
rc = os.system(cmd)
4749
if rc != 0:
4850
print(
@@ -58,23 +60,24 @@ def createS3Mounts(aliases, base_dir):
5860
print(f"A error accrued during the mounting of alias {aliases[i][0]}: {e}")
5961
return False
6062

63+
6164
def main():
6265
base_dir = "/s3-data"
63-
66+
6467
# Ensure the base directory exists
6568
if not os.path.exists(base_dir):
6669
os.makedirs(base_dir)
6770
print(f"Base directory created: {base_dir}")
6871

6972
# Regex pattern: S3_TO_LOCAL_<ALIAS>_ALIAS
7073
# The (.*) captures the <ALIAS> part
71-
pattern = re.compile(r'^S3_TO_LOCAL_(.*)_ALIAS$')
74+
pattern = re.compile(r"^S3_TO_LOCAL_(.*)_ALIAS$")
7275

7376
print("Scanning environment variables for S3 aliases...")
7477

7578
aliases = []
7679
for key, value in os.environ.items():
77-
match = pattern.match(key)
80+
match = pattern.match(key)
7881
if match:
7982
aliases.append(value)
8083

@@ -87,5 +90,6 @@ def main():
8790
else:
8891
print(f"Processed {len(aliases)} S3 alias(es).")
8992

93+
9094
if __name__ == "__main__":
9195
main()

tests/test_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def test_delete_workflow_with_interactive_session(
119119
"reana_workflow_controller.k8s",
120120
current_k8s_networking_api_client=mock.DEFAULT,
121121
):
122-
(response, http_response) = delete_workflow(workflow)
122+
response, http_response = delete_workflow(workflow)
123123
data = json.loads(response.get_data())
124124
assert "Workflow successfully deleted" in data["message"]
125125
assert http_response == 200

0 commit comments

Comments
 (0)