Skip to content

Commit

Permalink
Add template methods, add export to png firstdataset
Browse files Browse the repository at this point in the history
  • Loading branch information
Skwarson96 committed Feb 4, 2021
1 parent b468bb8 commit ba11e8f
Showing 1 changed file with 192 additions and 6 deletions.
198 changes: 192 additions & 6 deletions skull_stripping.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
"colab": {
"name": "skull_stripping",
"provenance": [],
"authorship_tag": "ABX9TyOA6MdVB0UFltJekdeTqM4l"
"authorship_tag": "ABX9TyOdh6X0fBcqMUIk2ylZOuwS"
},
"kernelspec": {
"name": "python3",
Expand Down Expand Up @@ -64,7 +64,7 @@
"!mkdir /content/FirstDataset/valid\n",
"!mkdir /content/SecondDataset/valid"
],
"execution_count": null,
"execution_count": 3,
"outputs": []
},
{
Expand All @@ -79,7 +79,11 @@
{
"cell_type": "code",
"metadata": {
"id": "GNW-oJqM7oVa"
"id": "GNW-oJqM7oVa",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "0a59f336-d638-443a-affe-9c9c4e8d699d"
},
"source": [
"import os\n",
Expand All @@ -101,8 +105,23 @@
"print('test', len(os.listdir('/content/SecondDataset/test')))\n",
"print(int(len(filenames2)/5)) # 20"
],
"execution_count": null,
"outputs": []
"execution_count": 4,
"outputs": [
{
"output_type": "stream",
"text": [
"FirstDataset\n",
"train 574\n",
"test 72\n",
"114\n",
"SecondDataset\n",
"train 100\n",
"test 25\n",
"20\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
Expand All @@ -116,7 +135,11 @@
{
"cell_type": "code",
"metadata": {
"id": "YbG-rMgq726q"
"id": "YbG-rMgq726q",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "048f1325-fe0c-497c-d223-9ba58adb2894"
},
"source": [
"import os\n",
Expand Down Expand Up @@ -150,6 +173,169 @@
"print('test len', len(os.listdir('/content/SecondDataset/test')))\n",
"print('finish 2')"
],
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"text": [
"train len: 460\n",
"valid len: 114\n",
"test len 72\n",
"finish 1\n",
"train len: 80\n",
"valid len: 20\n",
"test len 25\n",
"finish 2\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "fDPrcVQzy34B"
},
"source": [
"###Eksport to png"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "w2Qg0vUbzbja"
},
"source": [
"Template methods"
]
},
{
"cell_type": "code",
"metadata": {
"id": "grIXGjAEza4Q"
},
"source": [
"import numpy as np\n",
"import nibabel as nib\n",
"\n",
"from typing import Tuple, List\n",
"from pathlib import Path\n",
"\n",
"\n",
"def load_raw_volume(path: Path) -> Tuple[np.ndarray, np.ndarray]:\n",
" data: nib.Nifti1Image = nib.load(str(path))\n",
" data = nib.as_closest_canonical(data)\n",
" raw_data = data.get_fdata(caching='unchanged', dtype=np.float32)\n",
" return raw_data, data.affine\n",
"\n",
"\n",
"def load_labels_volume(path: Path) -> np.ndarray:\n",
" return load_raw_volume(path)[0].astype(np.uint8)\n",
"\n",
"\n",
"def save_labels(data: np.ndarray, affine: np.ndarray, path: Path):\n",
" nib.save(nib.Nifti1Image(data, affine), str(path))\n",
"\n",
"\n",
"def show_slices(slices: List[np.ndarray]):\n",
" fig, axes = plt.subplots(1, len(slices))\n",
" for i, data_slice in enumerate(slices):\n",
" axes[i].imshow(data_slice.T, cmap=\"gray\", origin=\"lower\")"
],
"execution_count": 6,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "YNh14hLCy6iW"
},
"source": [
"Create dirs for images"
]
},
{
"cell_type": "code",
"metadata": {
"id": "WPEbLLXBy3qQ"
},
"source": [
"!rm -rf FirstDataset_images\n",
"\n",
"\n",
"!mkdir /content/FirstDataset_images\n",
"\n",
"!mkdir /content/FirstDataset_images/train\n",
"!mkdir /content/FirstDataset_images/train/images\n",
"!mkdir /content/FirstDataset_images/train/masks\n",
"\n",
"!mkdir /content/FirstDataset_images/valid\n",
"!mkdir /content/FirstDataset_images/valid/images\n",
"!mkdir /content/FirstDataset_images/valid/masks"
],
"execution_count": 47,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "t6AGYKrR4c6i"
},
"source": [
"Export to png"
]
},
{
"cell_type": "code",
"metadata": {
"id": "ZKfmAV5P0Cdh"
},
"source": [
"import matplotlib.pyplot as plt\n",
"import cv2\n",
"\n",
"\n",
"first_dataset_path = Path('/content/FirstDataset/train')\n",
"first_image_dataset_path = Path('/content/FirstDataset_images/train/images')\n",
"first_mask_dataset_path = Path('/content/FirstDataset_images/train/masks')\n",
"\n",
"\n",
"for scan_path in first_dataset_path.iterdir():\n",
" print(scan_path)\n",
" raw_volume, affine = load_raw_volume(scan_path)\n",
" mask_volume = load_labels_volume(scan_path)\n",
" if scan_path.name.endswith('mask.nii.gz'):\n",
" for idx in range(mask_volume.shape[0]):\n",
" path = f'/content/FirstDataset_images/train/masks/{scan_path.name}_{idx}_.png'\n",
" plt.imsave(path, mask_volume[idx])\n",
" else:\n",
" for idx in range(raw_volume.shape[0]):\n",
" path = f'/content/FirstDataset_images/train/images/{scan_path.name}_{idx}_.png'\n",
" plt.imsave(path, raw_volume[idx])\n",
"\n",
"\n",
"print(\"TRAIN DATASET FINISHED\")\n",
"\n",
"first_dataset_path = Path('/content/FirstDataset/valid')\n",
"first_image_dataset_path_valid = Path('/content/FirstDataset_images/valid/images')\n",
"first_mask_dataset_path_valid = Path('/content/FirstDataset_images/valid/masks')\n",
"\n",
"for scan_path in first_dataset_path.iterdir():\n",
" print(scan_path)\n",
" raw_volume, affine = load_raw_volume(scan_path)\n",
" mask_volume = load_labels_volume(scan_path)\n",
" if scan_path.name.endswith('mask.nii.gz'):\n",
" for idx in range(mask_volume.shape[0]):\n",
" path = f'/content/FirstDataset_images/valid/masks/{scan_path.name}_{idx}_.png'\n",
" plt.imsave(path, mask_volume[idx])\n",
" else:\n",
" for idx in range(raw_volume.shape[0]):\n",
" path = f'/content/FirstDataset_images/valid/images/{scan_path.name}_{idx}_.png'\n",
" plt.imsave(path, raw_volume[idx])\n",
"\n",
"\n",
"print(\"VALID DATASET FINISHED\")\n"
],
"execution_count": null,
"outputs": []
}
Expand Down

0 comments on commit ba11e8f

Please sign in to comment.