diff --git a/scripts/init.sh b/scripts/init.sh
new file mode 100644
index 0000000000000000000000000000000000000000..3dd2b083157036d8bc4e8d56857f6219bb73c724
--- /dev/null
+++ b/scripts/init.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+# Simple init script for Python on DTU HPC
+# Patrick M. Jensen, patmjen@dtu.dk, 2022
+
+# Configuration
+# This is what you should change for your setup
+PYVER=3.9.14  # Python version
+CUVER=11.6  # CUDA version
+VENVDIR=.  # Where to store your virtualenv
+VENVNAME=venv  # Name of your virtualenv
+
+# Load modules
+module load python3/$PYVER
+module load $(module avail -o modulepath -t -C "python-${PYVER}" | grep "numpy/")
+module load $(module avail -o modulepath -t -C "python-${PYVER}" | grep "scipy/")
+module load $(module avail -o modulepath -t -C "python-${PYVER}" | grep "matplotlib/")
+module load $(module avail -o modulepath -t -C "python-${PYVER}" | grep "pandas/")
+module load cuda/$CUVER
+CUDNN_MOD=$(module avail -o modulepath -t cudnn | grep "cuda-${CUVER}" | sort | tail -n1)
+if [[ ${CUDNN_MOD} ]]
+then
+    module load ${CUDNN_MOD}
+fi
+
+# Create virtualenv if needed and activate it
+if [ ! -d "${VENVDIR}/${VENVNAME}" ]
+then
+    echo INFO: Did not find virtualenv. Creating...
+    virtualenv ${VENVDIR}/${VENVNAME}
+fi
+source ${VENVDIR}/${VENVNAME}/bin/activate
+
+# Make all GPUs visible
+if command -v nvidia-smi &> /dev/null
+then
+    export CUDA_VISIBLE_DEVICES=$(nvidia-smi --query-gpu=index --format=csv,noheader | tr '\n' ',')
+    echo CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES}
+fi
+