From 9ca6186c5b637500213ee86bb46c554d46eefc05 Mon Sep 17 00:00:00 2001
From: s214735 <s214735@dtu.dk>
Date: Mon, 20 Jan 2025 15:01:55 +0100
Subject: [PATCH] added and changed more notebooks

---
 ...ipynb => deep_learning_segmentation.ipynb} |   4 +-
 docs/notebooks/features.ipynb                 | 220 ++++++++++
 docs/notebooks/local_thickness.ipynb          |   2 +-
 docs/notebooks/ome_zarr.ipynb                 | 377 ++++++++++++++++++
 docs/notebooks/segmentation.ipynb             | 233 +++++++++++
 qim3d/features/_common_features_methods.py    |  30 +-
 qim3d/mesh/_common_mesh_methods.py            |   7 +-
 7 files changed, 852 insertions(+), 21 deletions(-)
 rename docs/notebooks/{segmentation_pipeline.ipynb => deep_learning_segmentation.ipynb} (99%)
 create mode 100644 docs/notebooks/features.ipynb
 create mode 100644 docs/notebooks/ome_zarr.ipynb
 create mode 100644 docs/notebooks/segmentation.ipynb

diff --git a/docs/notebooks/segmentation_pipeline.ipynb b/docs/notebooks/deep_learning_segmentation.ipynb
similarity index 99%
rename from docs/notebooks/segmentation_pipeline.ipynb
rename to docs/notebooks/deep_learning_segmentation.ipynb
index 4983a7fd..78466f19 100644
--- a/docs/notebooks/segmentation_pipeline.ipynb
+++ b/docs/notebooks/deep_learning_segmentation.ipynb
@@ -396,7 +396,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
+   "display_name": "qim3d-env",
    "language": "python",
    "name": "python3"
   },
@@ -410,7 +410,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.11.10"
   }
  },
  "nbformat": 4,
diff --git a/docs/notebooks/features.ipynb b/docs/notebooks/features.ipynb
new file mode 100644
index 00000000..1edf03a5
--- /dev/null
+++ b/docs/notebooks/features.ipynb
@@ -0,0 +1,220 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Features and mesh notebook\n",
+    "This notebook will demonstrate how to extract features from a volumetric object. Since these operations treat the volume as a mesh, we will also explore the use of the volume as a mesh, both visualizing and saving the object.\n",
+    "\n",
+    "First, we can generate an object using the `generate` module, and explore the shape using the `volumetric` method from the `viz` module:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "a16d769ea0474025b61581a195850836",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "Output()"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "import qim3d\n",
+    "\n",
+    "vol = qim3d.generate.noise_object(\n",
+    "    base_shape = (128, 128, 128),\n",
+    "    final_shape= (128, 128, 128),\n",
+    "    noise_scale= 0.00,\n",
+    "    gamma= 1.0,\n",
+    "    max_value= 255,\n",
+    "    threshold= 0.5,\n",
+    "    object_shape = 'cylinder'\n",
+    ")\n",
+    "\n",
+    "qim3d.viz.volumetric(vol)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now, that we have generate a massive volume of a ball, we can extract some features from it using the `features` module:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Converting volume to mesh.\n",
+      "Computed level using Otsu's method: 0\n",
+      "Padded volume with (2, 2, 2) to shape: (132, 132, 132)\n",
+      "Converting volume to mesh.\n",
+      "Computed level using Otsu's method: 0\n",
+      "Padded volume with (2, 2, 2) to shape: (132, 132, 132)\n",
+      "Converting volume to mesh.\n",
+      "Computed level using Otsu's method: 0\n",
+      "Padded volume with (2, 2, 2) to shape: (132, 132, 132)\n"
+     ]
+    }
+   ],
+   "source": [
+    "area = qim3d.features.area(vol)\n",
+    "volume = qim3d.features.volume(vol)\n",
+    "sphericity = qim3d.features.sphericity(vol)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Area: 42597.44460846406. \n",
+      "Volume: 729161.3333333334. \n",
+      "Sphericity: 0.919707365529198.\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(f\"Area: {area}. \\nVolume: {volume}. \\nSphericity: {sphericity}.\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "As we can see in the output logs, the volume is converted to a mesh, after which it's features are calculated. We can also transform the volume to a mesh first, followed by visualization and feature extraction: "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Computed level using Otsu's method: 0\n",
+      "Padded volume with (2, 2, 2) to shape: (132, 132, 132)\n"
+     ]
+    },
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "07c7c36f9ffb4d80baa500fbbb980516",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "Output()"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "mesh = qim3d.mesh.from_volume(vol)\n",
+    "\n",
+    "qim3d.viz.mesh(mesh.vertices, mesh.faces, wireframe=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Mesh area: 42597.44460846406. \n",
+      "Mesh volume: 729161.3333333334. \n",
+      "Mesh sphericity: 0.919707365529198.\n"
+     ]
+    }
+   ],
+   "source": [
+    "area_mesh = qim3d.features.area(mesh)\n",
+    "volume_mesh = qim3d.features.volume(mesh)\n",
+    "sphericity_mesh = qim3d.features.sphericity(mesh)\n",
+    "\n",
+    "print(f\"Mesh area: {area_mesh}. \\nMesh volume: {volume_mesh}. \\nMesh sphericity: {sphericity_mesh}.\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "After having created the mesh and found features of the mesh, we can save it, such that it can be visualized in other programs or reused at a later point:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "qim3d.io.save_mesh('circular_mesh.obj', mesh)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Should we wish to use it, it can be imported using:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "mesh = qim3d.io.load_mesh('circular_mesh.obj')"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "qim3d-env",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/notebooks/local_thickness.ipynb b/docs/notebooks/local_thickness.ipynb
index a3894702..e358c9ee 100644
--- a/docs/notebooks/local_thickness.ipynb
+++ b/docs/notebooks/local_thickness.ipynb
@@ -4,7 +4,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Local thickness notebook"
+    "## Local thickness notebook"
    ]
   },
   {
diff --git a/docs/notebooks/ome_zarr.ipynb b/docs/notebooks/ome_zarr.ipynb
new file mode 100644
index 00000000..12aecab3
--- /dev/null
+++ b/docs/notebooks/ome_zarr.ipynb
@@ -0,0 +1,377 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## OME-Zarr notebook\n",
+    "\n",
+    "This notebook will demonstrate the `qim3d` functionalities that specifically target the OME-Zarr file format.\n",
+    "\n",
+    "OME-Zarr is a cloud-native, multi-dimensional file format optimized for storing and analyzing large-scale volumetric imaging data. It leverages the Zarr storage framework, which organizes data in a chunked, hierarchical structure, enabling efficient random access to specific regions without loading entire datasets into memory. The format supports multi-resolution storage, allowing visualization and analysis at different levels of detail, improving performance for large datasets. OME-Zarr stores metadata in JSON format following the Open Microscopy Environment (OME) model, ensuring interoperability across a wide range of bioimaging tools. Additionally, its compatibility with modern data science libraries like Dask and Xarray allows for scalable parallel processing, making it an ideal choice for applications in microscopy, medical imaging, and machine learning.\n",
+    "\n",
+    "First we will fetch a large data file, which we can save to the OME-Zarr file type. Here we will use the `io.Downloader` class. First we define the variable, and then we evaluate what data we would like to download, by visualizing the options with the 'help' command."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 49,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Help on Downloader in module qim3d.io._downloader object:\n",
+      "\n",
+      "class Downloader(builtins.object)\n",
+      " |  Class for downloading large data files available on the [QIM data repository](https://data.qim.dk/).\n",
+      " |  \n",
+      " |  Attributes:\n",
+      " |      folder_name (str or os.PathLike): Folder class with the name of the folder in <https://data.qim.dk/>\n",
+      " |  \n",
+      " |  Syntax for downloading and loading a file is `qim3d.io.Downloader().{folder_name}.{file_name}(load_file=True)`\n",
+      " |  \n",
+      " |  ??? info \"Overview of available data\"\n",
+      " |      Below is a table of the available folders and files on the [QIM data repository](https://data.qim.dk/).\n",
+      " |  \n",
+      " |      Folder name         | File name                                                                                                          | File size\n",
+      " |      ------------------- | ------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------\n",
+      " |      `Coal`              | `CoalBrikett` <br> `CoalBrikett_Zoom` <br> `CoalBrikettZoom_DOWNSAMPLED`                                           | 2.23 GB <br> 3.72 GB <br> 238 MB\n",
+      " |      `Corals`            | `Coral_1` <br> `Coral_2` <br> `Coral2_DOWNSAMPLED` <br> `MexCoral`                                                 | 2.26 GB <br> 2.38 GB <br> 162 MB <br> 2.23 GB\n",
+      " |      `Cowry_Shell`       | `Cowry_Shell` <br> `Cowry_DOWNSAMPLED`                                                                             | 1.82 GB <br> 116 MB\n",
+      " |      `Crab`              | `HerrmitCrab` <br> `OkinawaCrab`                                                                                   | 2.38 GB <br> 1.86 GB\n",
+      " |      `Deer_Mandible`     | `Animal_Mandible` <br> `DeerMandible_DOWNSAMPLED` <br>                                                             | 2.79 GB <br> 638 MB\n",
+      " |      `Foam`              | `Foam` <br> `Foam_DOWNSAMPLED` <br> `Foam_2` <br> `Foam_2_zoom`                                                    | 3.72 GB <br> 238 MB <br> 3.72 GB <br> 3.72 GB\n",
+      " |      `Hourglass`         | `Hourglass` <br> `Hourglass_4X_80kV_Air_9s_1_97um` <br> `Hourglass_longexp_rerun`                                  | 3.72 GB <br> 1.83 GB <br> 3.72 GB\n",
+      " |      `Kiwi`              | `Kiwi`                                                                                                             | 2.86 GB\n",
+      " |      `Loofah`            | `Loofah` <br> `Loofah_DOWNSAMPLED`                                                                                 | 2.23 GB <br> 143 MB\n",
+      " |      `Marine_Gastropods` | `MarineGatropod_1` <br> `MarineGastropod1_DOWNSAMPLED` <br> `MarineGatropod_2` <br> `MarineGastropod2_DOWNSAMPLED` | 2.23 GB <br> 143 MB <br> 2.60 GB <br> 166 MB\n",
+      " |      `Mussel`            | `ClosedMussel1` <br> `ClosedMussel1_DOWNSAMPLED`                                                                   | 2.23 GB <br> 143 MB\n",
+      " |      `Oak_Branch`        | `Oak_branch` <br> `OakBranch_DOWNSAMPLED`                                                                          | 2.38 GB <br> 152 MB\n",
+      " |      `Okinawa_Forams`    | `Okinawa_Foram_1` <br> `Okinawa_Foram_2`                                                                           | 1.84 GB <br> 1.84 GB\n",
+      " |      `Physalis`          | `Physalis` <br> `Physalis_DOWNSAMPLED`                                                                             | 3.72 GB <br> 238 MB\n",
+      " |      `Raspberry`         | `Raspberry2` <br> `Raspberry2_DOWNSAMPLED`                                                                         | 2.97 GB <br> 190 MB\n",
+      " |      `Rope`              | `FibreRope1` <br> `FibreRope1_DOWNSAMPLED`                                                                         | 1.82 GB <br> 686 MB\n",
+      " |      `Sea_Urchin`        | `SeaUrchin` <br> `Cordatum_Shell` <br> `Cordatum_Spine`                                                            | 2.60 GB <br> 1.85 GB <br> 183 MB\n",
+      " |      `Snail`             | `Escargot`                                                                                                         | 2.60 GB\n",
+      " |      `Sponge`            | `Sponge`                                                                                                           | 1.11 GB\n",
+      " |  \n",
+      " |  Example:\n",
+      " |      ```python\n",
+      " |      import qim3d\n",
+      " |      \n",
+      " |      downloader = qim3d.io.Downloader()\n",
+      " |      data = downloader.Cowry_Shell.Cowry_DOWNSAMPLED(load_file=True)\n",
+      " |  \n",
+      " |      qim3d.viz.slicer_orthogonal(data, color_map=\"magma\")\n",
+      " |      ```\n",
+      " |      ![cowry shell](assets/screenshots/cowry_shell_slicer.gif)\n",
+      " |  \n",
+      " |  Methods defined here:\n",
+      " |  \n",
+      " |  __init__(self)\n",
+      " |      Initialize self.  See help(type(self)) for accurate signature.\n",
+      " |  \n",
+      " |  ----------------------------------------------------------------------\n",
+      " |  Data descriptors defined here:\n",
+      " |  \n",
+      " |  __dict__\n",
+      " |      dictionary for instance variables\n",
+      " |  \n",
+      " |  __weakref__\n",
+      " |      list of weak references to the object\n",
+      "\n"
+     ]
+    }
+   ],
+   "source": [
+    "import qim3d\n",
+    "\n",
+    "downloader = qim3d.io.Downloader()\n",
+    "help(downloader)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "After deciding to use the coral data, we fetch the data. We use the 'load_file' argument to load it while downloading it. Additionally, we can use the vizualisation tool `slicer_orthogonal` too explore the volume from three different axes."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 50,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "File already downloaded:\n",
+      "/home/s214735/qim3d/docs/notebooks/Corals/Coral2_DOWNSAMPLED.tif\n",
+      "\n",
+      "Loading Coral2_DOWNSAMPLED.tif\n"
+     ]
+    },
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "0745beff9bb54c64bf716f73168629f2",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "Loading:   0%|          | 0.00B/153MB  [00:00<?, ?B/s]"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Loaded shape: (500, 400, 400)\n",
+      "Using virtual stack\n"
+     ]
+    },
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "7179fe74113a4a98a2870334e1216b41",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "HBox(children=(interactive(children=(IntSlider(value=250, description='Z', max=499), Output()), layout=Layout(…"
+      ]
+     },
+     "execution_count": 50,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "vol = downloader.Corals.Coral2_DOWNSAMPLED(load_file=True)\n",
+    "\n",
+    "qim3d.viz.slicer_orthogonal(vol, color_map=\"magma\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Attempting to visualize the volume in a three-dimensional space will require a lot of computing power due to the size of the volume. However using the OME-Zarr data type, we can visualize it in chunks. Additionally we can save it in the .zarr format.\n",
+    "\n",
+    "First we export the file using the `export_ome_zarr` method."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 81,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Exporting data to OME-Zarr format at coral.zarr\n",
+      "Number of scales: 3\n",
+      "Calculating the multi-scale pyramid\n",
+      "- Scale 0: (500, 400, 400)\n",
+      "- Scale 1: (250, 200, 200)\n",
+      "- Scale 2: (125, 100, 100)\n",
+      "Writing data to disk\n"
+     ]
+    },
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "2196a3a7db3c4b059d86a9e050608ad2",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "Saving:   0%|          | 0.00/166 [00:00<?, ?Chunks/s]"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "All done!\n"
+     ]
+    }
+   ],
+   "source": [
+    "qim3d.io.export_ome_zarr(\n",
+    "    'coral.zarr',\n",
+    "    vol,\n",
+    "    chunk_size=200,\n",
+    "    downsample_rate=2,\n",
+    "    replace=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can then use the `chunks` method to visualize the chunks from the volume. Here we have both options for exploring the volume slices or the entire 3D object."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 52,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "2af896bb7a0b453e9de1cc7767265dae",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "VBox(children=(HTML(value='<h2>Chunk Explorer</h2>'), HBox(children=(VBox(children=(Dropdown(description='OME-…"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "qim3d.viz.chunks('coral.zarr')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "After looking at the object, we see that some of it is obstructed by background noise. Therefore we attempt to remove that with a threshold, followed by another export and visualization of the volume:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 82,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Exporting data to OME-Zarr format at coral_threshold.zarr\n",
+      "Number of scales: 3\n",
+      "Calculating the multi-scale pyramid\n",
+      "- Scale 0: (500, 400, 400)\n",
+      "- Scale 1: (250, 200, 200)\n",
+      "- Scale 2: (125, 100, 100)\n",
+      "Writing data to disk\n"
+     ]
+    },
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "756f4bb77ec140bd85d3986e20b7942a",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "Saving:   0%|          | 0.00/83.0 [00:00<?, ?Chunks/s]"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "All done!\n"
+     ]
+    },
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "2200dadf1af24cf89fa60d9eefd6c8b2",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "VBox(children=(HTML(value='<h2>Chunk Explorer</h2>'), HBox(children=(VBox(children=(Dropdown(description='OME-…"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "vol_t = vol > 30000\n",
+    "\n",
+    "qim3d.io.export_ome_zarr(\n",
+    "    'coral_threshold.zarr',\n",
+    "    vol_t,\n",
+    "    chunk_size=200,\n",
+    "    downsample_rate=2,\n",
+    "    replace=True)\n",
+    "\n",
+    "qim3d.viz.chunks('coral_threshold.zarr')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For next time the volume is to be used, we can import it easily using the `import_ome_zarr` method:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 83,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Data contains 3 scales:\n",
+      "- Scale 0: (500, 400, 400)\n",
+      "- Scale 1: (250, 200, 200)\n",
+      "- Scale 2: (125, 100, 100)\n",
+      "\n",
+      "Loading scale 0 with shape (500, 400, 400)\n"
+     ]
+    }
+   ],
+   "source": [
+    "vol_t = qim3d.io.import_ome_zarr('coral_threshold.zarr')"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "qim3d-env",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/notebooks/segmentation.ipynb b/docs/notebooks/segmentation.ipynb
new file mode 100644
index 00000000..920d62a2
--- /dev/null
+++ b/docs/notebooks/segmentation.ipynb
@@ -0,0 +1,233 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Segmentation notebook\n",
+    "\n",
+    "In this notebook we explore some of the segmentation tools the `qim3d` library provides.\n",
+    "\n",
+    "This example contains an image of air bubbles in a 3d volume representing a cutout of cement. The goal of the example is to detect the amount of holes and visualize the largest of them.\n",
+    "\n",
+    "First we load the image from the `qim3d.examples` module:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "5c2f4fac066740eca21a30d0a9cb4b36",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "interactive(children=(IntSlider(value=64, description='Slice', max=127), Output()), layout=Layout(align_items=…"
+      ]
+     },
+     "execution_count": 1,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import qim3d\n",
+    "vol = qim3d.examples.cement_128x128x128\n",
+    "qim3d.viz.slicer(vol)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next we binarize the image by applying a threshold. We visualize once again to confirm the threshold is not too high or low:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "878a63fbbd804ee290a4f988c8d4e971",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "interactive(children=(IntSlider(value=64, description='Slice', max=127), Output()), layout=Layout(align_items=…"
+      ]
+     },
+     "execution_count": 2,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "vol_t = vol < 60\n",
+    "\n",
+    "qim3d.viz.slicer(vol_t)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now the image can be segmented using the `get_3d_cc` method. This method calculates the connected components of a volume and returns a custom CC class. This class contains the `get_cc` method, that returns the segmented volume."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Total number of connected components found: 1845\n"
+     ]
+    }
+   ],
+   "source": [
+    "cc = qim3d.segmentation.get_3d_cc(vol_t)\n",
+    "vol_cc = cc.get_cc()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We see that 1845 connectec components were wound. However many of these are very small, and we would only like to find the largest ones. Therefore, we create a for-loop over the connected components, and remove any that are smaller than 300 voxels:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for i in range(1, len(cc)):\n",
+    "    if (vol_cc==i).sum() < 300:\n",
+    "        vol_cc[vol_cc == i] = 0\n",
+    "\n",
+    "vol_cc = vol_cc > 100"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now we can visualize it once again to confirm our operations have worked as intended:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "386452590d3b44fb9f1a9e8d6332453f",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "interactive(children=(IntSlider(value=64, description='Slice', max=127), Output()), layout=Layout(align_items=…"
+      ]
+     },
+     "execution_count": 6,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "qim3d.viz.slicer(vol_cc)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "In order to enable better visual distinction between the airbubbles, we can use the `watershed` method:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Total number of objects found: 210\n"
+     ]
+    }
+   ],
+   "source": [
+    "vol_label, num_labels = qim3d.segmentation.watershed(vol_cc, min_distance=5)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To make different color for every airbubble, we create our own colormap using the `viz.colormaps.segmentation` method:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "da40a0a00349401ca5d141b83e0bac2b",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "interactive(children=(IntSlider(value=64, description='Slice', max=127), Output()), layout=Layout(align_items=…"
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "color_map = qim3d.viz.colormaps.segmentation(num_labels)\n",
+    "qim3d.viz.slicer(vol_label, color_map=color_map)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "qim3d-env",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/qim3d/features/_common_features_methods.py b/qim3d/features/_common_features_methods.py
index d4ceec7e..3e3dd46d 100644
--- a/qim3d/features/_common_features_methods.py
+++ b/qim3d/features/_common_features_methods.py
@@ -5,7 +5,7 @@ import trimesh
 import qim3d
 
 
-def volume(obj, **mesh_kwargs) -> float:
+def volume(obj, logs: bool = True, **mesh_kwargs) -> float:
     """
     Compute the volume of a 3D volume or mesh.
 
@@ -45,13 +45,14 @@ def volume(obj, **mesh_kwargs) -> float:
 
     """
     if isinstance(obj, np.ndarray):
-        log.info("Converting volume to mesh.")
-        obj = qim3d.mesh.from_volume(obj, **mesh_kwargs)
+        if logs:
+            log.info("Converting volume to mesh.")
+        obj = qim3d.mesh.from_volume(obj, logs=logs, **mesh_kwargs)
 
     return obj.volume
 
 
-def area(obj, **mesh_kwargs) -> float:
+def area(obj, logs: bool = True, **mesh_kwargs) -> float:
     """
     Compute the surface area of a 3D volume or mesh.
 
@@ -91,14 +92,14 @@ def area(obj, **mesh_kwargs) -> float:
         ```
     """
     if isinstance(obj, np.ndarray):
-        log.info("Converting volume to mesh.")
-        obj = qim3d.mesh.from_volume(obj, **mesh_kwargs)
-        obj = qim3d.mesh.from_volume(obj, **mesh_kwargs)
+        if logs:
+            log.info("Converting volume to mesh.")
+        obj = qim3d.mesh.from_volume(obj, logs=logs, **mesh_kwargs)
 
     return obj.area
 
 
-def sphericity(obj, **mesh_kwargs) -> float:
+def sphericity(obj, logs: bool = True, **mesh_kwargs) -> float:
     """
     Compute the sphericity of a 3D volume or mesh.
 
@@ -145,19 +146,16 @@ def sphericity(obj, **mesh_kwargs) -> float:
         Higher resolution meshes may mitigate these errors but often at the cost of increased computational demands.
     """
     if isinstance(obj, np.ndarray):
-        log.info("Converting volume to mesh.")
-        obj = qim3d.mesh.from_volume(obj, **mesh_kwargs)
-        obj = qim3d.mesh.from_volume(obj, **mesh_kwargs)
+        if logs:
+            log.info("Converting volume to mesh.")
+        obj = qim3d.mesh.from_volume(obj, logs=logs, **mesh_kwargs)
 
-    volume = qim3d.features.volume(obj)
-    area = qim3d.features.area(obj)
-    volume = qim3d.features.volume(obj)
-    area = qim3d.features.area(obj)
+    volume = qim3d.features.volume(obj, logs=logs)
+    area = qim3d.features.area(obj, logs=logs)
 
     if area == 0:
         log.warning("Surface area is zero, sphericity is undefined.")
         return np.nan
 
     sphericity = (np.pi ** (1 / 3) * (6 * volume) ** (2 / 3)) / area
-    log.info(f"Sphericity: {sphericity}")
     return sphericity
diff --git a/qim3d/mesh/_common_mesh_methods.py b/qim3d/mesh/_common_mesh_methods.py
index e3746efe..51500b76 100644
--- a/qim3d/mesh/_common_mesh_methods.py
+++ b/qim3d/mesh/_common_mesh_methods.py
@@ -11,6 +11,7 @@ def from_volume(
     step_size=1,
     allow_degenerate=False,
     padding: Tuple[int, int, int] = (2, 2, 2),
+    logs: bool = True,
     **kwargs: Any,
 ) -> trimesh.Trimesh:
     """
@@ -50,7 +51,8 @@ def from_volume(
     # Compute the threshold level if not provided
     if level is None:
         level = filters.threshold_otsu(volume)
-        log.info(f"Computed level using Otsu's method: {level}")
+        if logs:
+            log.info(f"Computed level using Otsu's method: {level}")
 
     # Apply padding to the volume
     if padding is not None:
@@ -62,7 +64,8 @@ def from_volume(
             mode="constant",
             constant_values=padding_value,
         )
-        log.info(f"Padded volume with {padding} to shape: {volume.shape}")
+        if logs:
+            log.info(f"Padded volume with {padding} to shape: {volume.shape}")
 
     # Call skimage.measure.marching_cubes with user-provided kwargs
     verts, faces, normals, values = measure.marching_cubes(
-- 
GitLab