diff --git a/notebooks/Neuroglancer.ipynb b/notebooks/Neuroglancer.ipynb index 58b2d31..95205da 100644 --- a/notebooks/Neuroglancer.ipynb +++ b/notebooks/Neuroglancer.ipynb @@ -3,21 +3,16 @@ { "cell_type": "code", "execution_count": null, - "id": "79009c5d-a064-4b31-802a-f16fe5b64c20", - "metadata": {}, + "id": "dc8e259402bd08ed", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ - "import neuroglancer\n", "import neuroglancer.static_file_server\n", - "import numpy as np\n", - "import SimpleITK as sitk\n", - "import xml.etree.ElementTree as ET\n", + "from pytools.ng.viz import add_zarr_image, add_roi_annotations\n", "from pathlib import Path\n", - "from pytools import HedwigZarrImages\n", - "\n", - "from dask.distributed import LocalCluster\n", - "cluster = LocalCluster()\n", - "client = cluster.get_client()" + "from dask.distributed import LocalCluster\n" ] }, { @@ -27,276 +22,32 @@ "metadata": {}, "outputs": [], "source": [ + "\n", + "cluster = LocalCluster()\n", + "client = cluster.get_client()\n", + "\n", "file_dir = Path(\"/Users/blowekamp/scratch/hedwig/TestData/Nanostringfiles/ROI Alignment Images for Brad/\")\n", "server = neuroglancer.static_file_server.StaticFileServer(\n", " static_dir=file_dir, bind_address=\"localhost\", daemon=True\n", " )\n", - "viewer = neuroglancer.Viewer()\n", - "\n", - "shader_parameter_cache = {}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fd5e8627-945d-452a-bc69-ea33dbc64d61", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "def homogeneous_identity(N) -> np.array: \n", - " return np.identity(N+1)[:N,:]\n", - "\n", - "\n", - "def sitk_offset_from_transform( tx: sitk.Transform ) : \n", - " return tx.TransformPoint((0,)*tx.GetDimension())\n", - "\n", - "def sitk_transform_to_ng_transform( tx: sitk.Transform ):\n", - " \n", - " tx = tx.GetInverse()\n", - " tx_matrix = np.array(tx.GetMatrix()).reshape(tx.GetDimension(),tx.GetDimension())\n", - "\n", - " M = homogeneous_identity(5)\n", - " M[4,4] = tx_matrix[0,0]\n", - " M[3,4] = tx_matrix[1,0]\n", - " M[4,3] = tx_matrix[0,1]\n", - " M[3,3] = tx_matrix[1,1] \n", - "\n", - " tx_translation = sitk_offset_from_transform(tx)\n", - " M[4,5] = (tx_translation[0])*1e3\n", - " M[3,5] = (tx_translation[1])*1e3\n", - "\n", - " output_dimensions = neuroglancer.CoordinateSpace( names=[\"t'\", \"c^\", \"z\", \"y\", \"x\"], units =[\"\", \"\", \"nm\",\"nm\", \"nm\"], scales=[1,1,1,1,1])\n", - " ng_transform = neuroglancer.CoordinateSpaceTransform(output_dimensions=output_dimensions,\n", - " matrix = M)\n", - " return ng_transform\n", - "\n", - "def add_zarr_image(viewer_txn, zarr_filename, transform_filename=None ):\n", - " \n", - " zarr_root = Path(zarr_filename).parent\n", - " zarr_key = int(Path(zarr_filename).name)\n", - " \n", - " layername = f\"{zarr_root.name}/{zarr_key}\"\n", - " output_dimensions = neuroglancer.CoordinateSpace( names=[\"t'\", \"c^\", \"z\", \"y\", \"x\"], units =[\"\", \"\", \"mm\",\"mm\", \"mm\"], scales=[1,1,1,1,1])\n", - " \n", - " if transform_filename:\n", - " tx = sitk.ReadTransform(transform_filename)\n", - " ng_transform = sitk_transform_to_ng_transform(tx)\n", - " else:\n", - " M = homogeneous_identity(5)\n", - " ng_transform = neuroglancer.CoordinateSpaceTransform(output_dimensions=output_dimensions,\n", - " matrix = M)\n", - " \n", - " viewer_txn.layers[layername] = neuroglancer.ImageLayer(\n", - " source=neuroglancer.LayerDataSource(f\"zarr://{server.url}/{zarr_filename}\",\n", - " transform=ng_transform),\n", - " shader=generate_ng_shader(file_dir/zarr_root, zarr_key),\n", - " )\n", - " \n", - "\n", - "def add_roi_annotations(viewer_txn, ome_xml_filename, *, layername=\"roi annotation\", reference_zarr=None):\n", - "\n", - " scales= [398, 396]\n", - " if reference_zarr:\n", - " \n", - " zarr_root = Path(reference_zarr).parent\n", - " zarr_key = int(Path(reference_zarr).name)\n", - " hwz_images = HedwigZarrImages(zarr_root)\n", - " hwz_image = hwz_images[list(hwz_images.get_series_keys())[zarr_key]]\n", - " spacing_tczyx = hwz_image.spacing\n", - " # select X and Y scales and convert to nm from um\n", - " scales = [s*1e3 for s in spacing_tczyx[:2:-1]]\n", - " \n", - "\n", - " xml_path = Path(ome_xml_filename)\n", - " \n", - " ns = {\"OME\": \"http://www.openmicroscopy.org/Schemas/OME/2016-06\"}\n", - " with open(xml_path, \"r\") as fp:\n", - " data = fp.read()\n", - " xml_root = ET.fromstring(data)\n", - "\n", - " layer = neuroglancer.LocalAnnotationLayer(\n", - " dimensions=neuroglancer.CoordinateSpace(\n", - " names=[\"x\", \"y\"],\n", - " units=\"nm\",\n", - " scales=scales,\n", - " ),\n", - " \n", - " )\n", - " \n", - " viewer_txn.layers[layername]=layer\n", - " \n", - "\n", - "\n", - " # Coordinates are in the space of the original input image. The dimensions/CooridinateSpace map the index space to physical space.\n", - " for roi in xml_root.iterfind(\"OME:ROI\", ns):\n", - " for r in roi.iterfind(\"./OME:Union/OME:Rectangle\", ns):\n", - " height = float(r.attrib[\"Height\"])\n", - " width = float(r.attrib[\"Width\"])\n", - " x = float(r.attrib[\"X\"])\n", - " y = float(r.attrib[\"Y\"])\n", - " \n", - " a = (x, y)\n", - " b = (x+width, y+height)\n", - " for l in roi.iterfind(\"./OME:Union/OME:Label\", ns):\n", - " text = l.attrib[\"Text\"]\n", - " \n", - " print(a,b)\n", - " layer.annotations.append(\n", - " neuroglancer.AxisAlignedBoundingBoxAnnotation(\n", - " description=text,\n", - " id=neuroglancer.random_token.make_random_token(),\n", - " point_a=a,\n", - " point_b=b\n", - " )\n", - " ) " + "viewer = neuroglancer.Viewer()" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "3583fa67-80dd-4e72-b802-db331c3ba81e", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "def generate_ng_shader(path, key):\n", - " global shader_parameter_cache\n", - " \n", - " if (path, key) not in shader_parameter_cache:\n", - " hwz_images = HedwigZarrImages(path)\n", - " hwz_image = hwz_images[list(hwz_images.get_series_keys())[key]]\n", - " shader_parameter_cache[(path, key)] = hwz_image.neuroglancer_shader_parameters(middle_quantile=[0.01, 0.999])\n", - " \n", - " rgb_shader_code=\"\"\"\n", - "void main() {\n", - " emitRGB(vec3(toNormalized(getDataValue(0)),\n", - " toNormalized(getDataValue(1)),\n", - " toNormalized(getDataValue(2))));\n", - "}\n", - "\"\"\"\n", - " \n", - " if hwz_image.shader_type == \"RGB\":\n", - " return rgb_shader_code \n", - " \n", - " params = shader_parameter_cache[(path, key)]\n", - " \n", - " \n", - " shader_code =f\"\"\"\n", - "#uicontrol float brightness slider(default={params[\"brightness\"]}, min=-1, max=1, step=0.1)\n", - "#uicontrol float contrast slider(default={params[\"contrast\"]}, min=-3, max=3, step=0.1)\n", - "\"\"\"\n", - "\n", - "\n", - " for channel in params[\"channelArray\"]:\n", - " channel_idx = channel[\"channel\"]\n", - " shader_code += f\"\"\"\n", - "#uicontrol bool {channel[\"name\"]} checkbox(default=true)\n", - "#uicontrol vec3 color{channel_idx} color(default=\"{channel[\"color\"]}\")\n", - "#uicontrol invlerp invlerp{channel_idx}(range=[{channel[\"range\"][0]}, {channel[\"range\"][1]}], window=[{channel[\"window\"][0]}, {channel[\"window\"][1]}], channel={channel_idx}, clamp=true)\n", - "\"\"\"\n", - " shader_code += \"\"\"\n", - "void main() {\n", - " vec3 cum = vec3(0., 0., 0.);\n", - " \"\"\"\n", - " for channel in params[\"channelArray\"]:\n", - " channel_idx = channel[\"channel\"]\n", - " shader_code += f\"\"\"\n", - " if ({channel[\"name\"]})\n", - " {{\n", - " cum += color{channel_idx} * invlerp{channel_idx}(getDataValue({channel_idx}));\n", - " }}\n", - " \"\"\"\n", - " \n", - " shader_code += \"\"\"\n", - " emitRGB((cum+brightness)*exp(contrast));\n", - " }\n", - " \"\"\"\n", - " return shader_code \n", - " " - ] - }, - { - "cell_type": "code", - "outputs": [], - "source": [ - "\n", - "def add_sitk_image(img:sitk.Image, name=\"image\", transform=None, shader_code=None):\n", - " \n", - " img = img.ToScalarImage(inPlace=False)\n", - " assert(img.GetDimension()==3)\n", - "\n", - " dimensions=neuroglancer.CoordinateSpace(\n", - " names=['y', 'x', 'c^'],\n", - " units=[\"nm\", \"nm\", \"\"],\n", - " scales=img.GetSpacing()[::-1])\n", - " \n", - "\n", - " ng_transform = None\n", - " M = homogeneous_identity(3)\n", - " if transform:\n", - " print(transform.GetName())\n", - " if transform.GetName() == \"TranslationTransform\":\n", - " transform = sitk.AffineTransform(np.identity(2, dtype=float).flatten(), transform.GetOffset())\n", - " \n", - " tx_matrix = np.array(transform.GetMatrix()).reshape(transform.GetDimension(),transform.GetDimension())\n", - "\n", - " M[1,1] = tx_matrix[0,0]\n", - " M[0,1] = tx_matrix[1,0]\n", - " M[1,0] = tx_matrix[0,1]\n", - " M[0,0] = tx_matrix[1,1] \n", - " \n", - " tx_translation = sitk_offset_from_transform(transform)\n", - " tx_translation = [t/s for t,s in zip(tx_translation, img.GetSpacing()[1:])]\n", - " #M[0,3] = tx_translation[1]\n", - " #M[1,3] = tx_translation[0]\n", - " \n", - " print(tx_translation)\n", - " \n", - " ng_transform = neuroglancer.CoordinateSpaceTransform(output_dimensions=dimensions,\n", - " matrix = M)\n", - "\n", - " volume = neuroglancer.LocalVolume( sitk.GetArrayViewFromImage(img), dimensions=dimensions )\n", - " \n", - " \n", - " with viewer.txn() as s:\n", - " layer = neuroglancer.ImageLayer(\n", - " source=neuroglancer.LayerDataSource(volume, transform=ng_transform),\n", - " shader=shader_code,\n", - " )\n", - " s.layers[name] = layer" - ], - "metadata": {}, - "id": "c9e19791-5228-416c-9205-5a29fdf37106", - "execution_count": null - }, - { - "cell_type": "code", - "outputs": [], - "source": [ - "shader_parameter_cache = {}" - ], - "metadata": { - "collapsed": false - }, - "id": "b2ba96c94a793418", - "execution_count": null - }, { "cell_type": "code", "outputs": [], "source": [ "with viewer.txn() as s:\n", - " dimensions = neuroglancer.CoordinateSpace( names=[\"x\", \"y\", \"z\"], units =[\"nm\",\"nm\", \"nm\"], scales=[1,1,1])\n", + " dimensions = neuroglancer.CoordinateSpace( names=[\"x\", \"y\", \"z\"], units =[\"µm\",\"µm\", \"µm\"], scales=[1,1,1])\n", " s.dimensions = dimensions\n", " s.layout = neuroglancer.DataPanelLayout(\"xy\")\n", " \n", " s.layers.clear()\n", "\n", - " add_zarr_image(s, \"IA_P2_S1.ome.zarr/0\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S1.ome.zarr/0\", server_url=server.url)\n", " \n", - " add_zarr_image(s, \"IA_P2_S4.zarr/0\", file_dir/\"IA_P2_S4_0_to_roi.txt\")\n", - " add_zarr_image(s, \"IA_P2_S4.zarr/1\", file_dir/\"IA_P2_S4_1_to_roi.txt\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S4.zarr/0\", server.url, file_dir/\"IA_P2_S4_0_to_roi.txt\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S4.zarr/1\", server.url, file_dir/\"IA_P2_S4_1_to_roi.txt\")\n", " \n", " add_roi_annotations(s, Path(file_dir)/ \"IA_P2_S1.ome.zarr/OME/METADATA.ome.xml\",\n", " layername=\"roi annotation\",\n", @@ -311,101 +62,76 @@ }, { "cell_type": "code", + "execution_count": null, + "id": "5a96d34fc488b9a6", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "with viewer.txn() as s:\n", - " dimensions = neuroglancer.CoordinateSpace( names=[\"x\", \"y\", \"z\"], units =[\"nm\",\"nm\", \"nm\"], scales=[1,1,1])\n", + " dimensions = neuroglancer.CoordinateSpace( names=[\"x\", \"y\", \"z\"], units =[\"µm\",\"µm\", \"µm\"], scales=[1,1,1])\n", " s.dimensions = dimensions\n", " s.layout = neuroglancer.DataPanelLayout(\"xy\")\n", "\n", " s.layers.clear()\n", "\n", - " add_zarr_image(s, \"IA_P2_S2.ome.zarr/0\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S2.ome.zarr/0\", server_url=server.url)\n", " \n", - " add_zarr_image(s, \"IA_P2_S2.zarr/0\", file_dir/\"IA_P2_S2_0_to_roi.txt\")\n", - " add_zarr_image(s, \"IA_P2_S2.zarr/1\", file_dir/\"IA_P2_S2_1_to_roi.txt\",)\n", + " add_zarr_image(s, file_dir/\"IA_P2_S2.zarr/0\", server.url, file_dir/\"IA_P2_S2_0_to_roi.txt\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S2.zarr/1\", server.url, file_dir/\"IA_P2_S2_1_to_roi.txt\")\n", " \n", " add_roi_annotations(s, Path(file_dir)/ \"IA_P2_S2.ome.zarr/OME/METADATA.ome.xml\")\n", "print(viewer)\n" - ], - "metadata": { - "collapsed": false - }, - "id": "5a96d34fc488b9a6", - "execution_count": null + ] }, { "cell_type": "code", + "execution_count": null, + "id": "31eae17a-1da0-404c-b7ce-d1648a904b14", + "metadata": {}, "outputs": [], "source": [ - "\n", "with viewer.txn() as s:\n", - " dimensions = neuroglancer.CoordinateSpace( names=[\"x\", \"y\", \"z\"], units =[\"nm\",\"nm\", \"nm\"], scales=[1,1,1])\n", + " dimensions = neuroglancer.CoordinateSpace( names=[\"x\", \"y\", \"z\"], units =[\"µm\",\"µm\", \"µm\"], scales=[1,1,1])\n", " s.dimensions = dimensions\n", " s.layout = neuroglancer.DataPanelLayout(\"xy\")\n", " \n", " s.layers.clear()\n", " \n", - " add_zarr_image(s, \"IA_P2_S3.ome.zarr/0\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S3.ome.zarr/0\", server_url=server.url)\n", " \n", - " add_zarr_image(s, \"IA_P2_S3.zarr/0\", file_dir/\"IA_P2_S3_0_to_roi.txt\")\n", - " add_zarr_image(s, \"IA_P2_S3.zarr/1\", file_dir/\"IA_P2_S3_1_to_roi.txt\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S3.zarr/0\", server.url, file_dir/\"IA_P2_S3_0_to_roi.txt\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S3.zarr/1\", server.url, file_dir/\"IA_P2_S3_1_to_roi.txt\")\n", "\n", " add_roi_annotations(s, Path(file_dir)/ \"IA_P2_S3.ome.zarr/OME/METADATA.ome.xml\")\n", "\n", "print(viewer)" - ], - "metadata": {}, - "id": "31eae17a-1da0-404c-b7ce-d1648a904b14", - "execution_count": null + ] }, { "cell_type": "code", + "execution_count": null, + "id": "82495f65-5634-45cd-91a3-0792f03dd941", + "metadata": {}, "outputs": [], "source": [ "with viewer.txn() as s:\n", - " dimensions = neuroglancer.CoordinateSpace( names=[\"x\", \"y\", \"z\"], units =[\"nm\",\"nm\", \"nm\"], scales=[1,1,1])\n", + " dimensions = neuroglancer.CoordinateSpace( names=[\"x\", \"y\", \"z\"], units =[\"µm\",\"µm\", \"µm\"], scales=[1,1,1])\n", " s.dimensions = dimensions\n", " s.layout = neuroglancer.DataPanelLayout(\"xy\")\n", "\n", " s.layers.clear()\n", "\n", - " add_zarr_image(s, \"IA_P2_S4.ome.zarr/0\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S4.ome.zarr/0\", server_url=server.url)\n", " \n", - " add_zarr_image(s, \"IA_P2_S1.zarr/0\", file_dir/\"IA_P2_S1_0_to_roi.txt\")\n", - " add_zarr_image(s, \"IA_P2_S1.zarr/1\", file_dir/\"IA_P2_S1_1_to_roi.txt\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S1.zarr/0\", server.url, file_dir/\"IA_P2_S1_0_to_roi.txt\")\n", + " add_zarr_image(s, file_dir/\"IA_P2_S1.zarr/1\", server.url, file_dir/\"IA_P2_S1_1_to_roi.txt\")\n", "\n", " add_roi_annotations(s, Path(file_dir)/ \"IA_P2_S4.ome.zarr/OME/METADATA.ome.xml\")\n", "print(viewer)\n", "\n" - ], - "metadata": {}, - "id": "82495f65-5634-45cd-91a3-0792f03dd941", - "execution_count": null - }, - { - "cell_type": "code", - "outputs": [], - "source": [ - "zarr_path = Path(file_dir)/\"IA_P2_S1.ome.zarr\"\n", - "hwz_images = HedwigZarrImages(zarr_path)\n", - "hwz_img = hwz_images[list(hwz_images.get_series_keys())[0]]\n", - "\n", - "print(hwz_img.neuroglancer_shader_parameters(middle_quantile=[0.1, 0.99]))\n" - ], - "metadata": {}, - "id": "0cc2b262-3039-44e9-88d2-c652a8ee765b", - "execution_count": null - }, - { - "cell_type": "code", - "outputs": [], - "source": [], - "metadata": { - "collapsed": false - }, - "id": "a5b02df5fc4ecb95", - "execution_count": null + ] } ], "metadata": { @@ -424,7 +150,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.9.12" } }, "nbformat": 4, diff --git a/pytools/HedwigZarrImage.py b/pytools/HedwigZarrImage.py index e57a01d..8577fa6 100644 --- a/pytools/HedwigZarrImage.py +++ b/pytools/HedwigZarrImage.py @@ -31,7 +31,7 @@ class HedwigZarrImage: Represents a OME-NGFF Zarr pyramidal image. The members provide information useful for the Hedwig imaging pipelines. """ - def __init__(self, zarr_grp: zarr.Group, _ome_info: OMEInfo, _ome_idx: int): + def __init__(self, zarr_grp: zarr.Group, _ome_info: OMEInfo, _ome_idx: Optional[int] = None): self.zarr_group = zarr_grp self.ome_info = _ome_info self.ome_idx = _ome_idx diff --git a/pytools/HedwigZarrImages.py b/pytools/HedwigZarrImages.py index 92f0b9a..fffc8d4 100644 --- a/pytools/HedwigZarrImages.py +++ b/pytools/HedwigZarrImages.py @@ -12,9 +12,8 @@ # limitations under the License. from pathlib import Path - import zarr -from typing import Optional, Iterable, Tuple, AnyStr +from typing import Optional, Iterable, Tuple, AnyStr, Union from pytools.utils import OMEInfo from pytools.HedwigZarrImage import HedwigZarrImage import logging @@ -75,7 +74,20 @@ def get_series_keys(self) -> Iterable[str]: return filter(lambda x: x != "OME", self.zarr_root.group_keys()) - def __getitem__(self, item) -> HedwigZarrImage: + def group(self, name: str) -> HedwigZarrImage: + """ + Returns a HedwigZarrImage from the given ZARR group name or path. + """ + + if self.ome_xml_path is None: + return HedwigZarrImage(self.zarr_root[name]) + + ome_index_to_zarr_group = self.zarr_root["OME"].attrs["series"] + k_idx = ome_index_to_zarr_group.index(name) + return HedwigZarrImage(self.zarr_root[name], self.ome_info, k_idx) + + def __getitem__(self, item: Union[str, int]) -> HedwigZarrImage: + for k_idx, k in enumerate(self.get_series_keys()): if item == k and "OME" in self.zarr_root.group_keys(): ome_index_to_zarr_group = self.zarr_root["OME"].attrs["series"] diff --git a/pytools/ng/viz.py b/pytools/ng/viz.py new file mode 100644 index 0000000..2552013 --- /dev/null +++ b/pytools/ng/viz.py @@ -0,0 +1,246 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0.txt +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import numpy as np +import SimpleITK as sitk +import neuroglancer +from pathlib import Path +import jinja2 +import xml.etree.ElementTree as ET +from pytools import HedwigZarrImages +from typing import Union + + +_rgb_shader_template = """ +void main() { + emitRGB(vec3(toNormalized(getDataValue(0)), + toNormalized(getDataValue(1)), + toNormalized(getDataValue(2)))); +} +""" + +_gray_shader_template = """ +#uicontrol invlerp normalized(range=[{{range[0]}}, {{range[1]}}], + window=[{{window[0]}}, {{window[1]}}], clamp=true) +void main() { + emitGrayscale(normalized()); +} +""" + +_multichannel_template = """ +#uicontrol float brightness slider(default={{brightness}}, min=-1, max=1, step=0.1) +#uicontrol float contrast slider(default={{contrast}}, min=-3, max=3, step=0.1) + +{% for channel in channelArray %} +#uicontrol bool {{channel.name}} checkbox(default=true) +#uicontrol vec3 color{{channel.channel}} color(default="{{channel.color}}") +#uicontrol invlerp invlerp{{channel.channel}}(range=[{{channel.range[0]}}, {{channel.range[1]}}], \ +window=[{{channel.window[0]}}, {{channel.window[1]}}], \ +channel={{channel.channel}}, \ +clamp=true) +{% endfor %} + +void main() { + vec3 cum = vec3(0., 0., 0.); + {% for channel in channelArray %} + if ({{channel.name}}) + { + cum += color{{channel.channel}} * invlerp{{channel.channel}}(getDataValue({{channel.channel}})); + } + {% endfor %} + emitRGB((cum+brightness)*exp(contrast)); +} +""" + + +_shader_parameter_cache = {} + + +def _homogeneous_identity(ndim: int) -> np.array: + """ + Create a homogeneous identity matrix of dimension ndim rows by ndim+1 columns. + """ + return np.identity(ndim + 1)[:ndim, :] + + +def _sitk_offset_from_transform(tx: sitk.Transform): + """ + Get the offset from a SimpleITK transform since the method is not exposed in the Python API. + + The offset is the translation part a MatrixOffsetBaseTransform. + + """ + # compute the offset from the transform by transforming a zero point + return tx.TransformPoint((0,) * tx.GetDimension()) + + +def _sitk_transform_to_ng_transform(tx: sitk.Transform, inverse: bool = True) -> neuroglancer.CoordinateSpaceTransform: + """ + Convert a SimpleITK transform to a neuroglancer.CoordinateSpaceTransform. Assumes input transform is a 2D affine + transform. The output is assumed to be 5D [TCZYX]. + """ + assert tx.GetDimension() == 2 + + if inverse: + tx = tx.GetInverse() + tx_matrix = np.array(tx.GetMatrix()).reshape(tx.GetDimension(), tx.GetDimension()) + + # convert [XY], to [YX] + tx_matrix = tx_matrix[::-1, ::-1] + + M = _homogeneous_identity(5) + # assign to the YX part of the matrix + M[3:5, 3:5] = tx_matrix + + tx_translation = np.array(_sitk_offset_from_transform(tx)) + + # convert [XY], to [YX] + tx_translation = tx_translation[::-1] + M[3:5, 5] = tx_translation + + output_dimensions = neuroglancer.CoordinateSpace( + names=["t'", "c^", "z", "y", "x"], units=["", "", "µm", "µm", "µm"], scales=[1, 1, 1, 1, 1] + ) + ng_transform = neuroglancer.CoordinateSpaceTransform(output_dimensions=output_dimensions, matrix=M) + return ng_transform + + +def generate_ng_shader(path: Path, key: Union[int, str]) -> str: + """ + Given a path to a zarr structure and a key, generate a neuroglancer shader for the image. + + This will call teh HewdigZarrImages class to get the shader parameters for the image which can be computationaly + expensive so the results a cached. + """ + global _shader_parameter_cache + + hwz_images = HedwigZarrImages(path) + hwz_image = hwz_images.group(key) + + if (path, key) not in _shader_parameter_cache: + _shader_parameter_cache[(path, key)] = hwz_image.neuroglancer_shader_parameters(middle_quantile=[0.01, 0.99]) + + params = _shader_parameter_cache[(path, key)] + + if hwz_image.shader_type == "RGB": + template = _rgb_shader_template + elif hwz_image.shader_type == "Grayscale": + template = _gray_shader_template + else: + template = _multichannel_template + + j2_template = jinja2.Template(template) + shader_code = j2_template.render(params) + + return shader_code + + +def add_zarr_image(viewer_txn: neuroglancer.Viewer, zarr_path: Path, server_url: str, transform_filename=None): + """ + With in neuroglancer viewer context, ad a zarr image to the viewer. + + :param viewer_txn: The neuroglancer viewer transaction object. + :param zarr_path: The path to the zarr file including the key for the sub image. + :param server_url: The url to the server hosting the zarr file. The zarr_path will be appended to this url with the + component containing the "zarr" extensions and the subsequent key. + :param transform_filename: The filename of a SimpleITK transform file. The provided transform file is a SimpleITK + format. The transform maps points from the output space to the input space, and is inverted before being passed + to neuroglancer. If None, then the identity transform is used. + + """ + + zarr_root = zarr_path.parent + zarr_key = zarr_path.name + + layername = f"{zarr_root.name}/{zarr_key}" + + if transform_filename: + tx = sitk.ReadTransform(transform_filename) + ng_transform = _sitk_transform_to_ng_transform(tx, inverse=True) + else: + # Assuming stander 5D [TCZYX] of zarr, and output in nanometer units will be the correct scale. # + output_dimensions = neuroglancer.CoordinateSpace( + names=["t'", "c^", "z", "y", "x"], units=["", "", "µm", "µm", "µm"], scales=[1, 1, 1, 1, 1] + ) + M = _homogeneous_identity(5) + ng_transform = neuroglancer.CoordinateSpaceTransform(output_dimensions=output_dimensions, matrix=M) + + viewer_txn.layers[layername] = neuroglancer.ImageLayer( + source=neuroglancer.LayerDataSource(f"zarr://{server_url}/{zarr_root.name}/{zarr_key}", transform=ng_transform), + shader=generate_ng_shader(zarr_root, zarr_key), + ) + + +def add_roi_annotations(viewer_txn, ome_xml_filename, *, layername="roi annotation", reference_zarr=None): + """ + Add ROI annotations to the neuroglancer viewer. The annotations are read from the OME-XML file. + + The OME-XML specifications for ROI models is here: + https://docs.openmicroscopy.org/ome-model/5.6.3/developers/roi.html + + :param viewer_txn: The neuroglancer viewer transaction object. + :param ome_xml_filename: The path to the OME-XML file. + :param layername: The name of the annotation layer in the viewer. + :param reference_zarr: The path to the reference zarr file. The ROI is specified in the image coordinate space and + the image meta-data is needed to convert to the physical space. NOTE: This could come from the OME-XML description + for an image but currently does not. + + """ + + if reference_zarr: + # Coordinate for the ROI rectangles are in the space of an image. The dimensions/CoordinateSpace map the + # index space to physical space and the "scales" from the reference image are needed to map the space. + zarr_root = Path(reference_zarr).parent + zarr_key = Path(reference_zarr).name + hwz_images = HedwigZarrImages(zarr_root) + hwz_image = hwz_images.group(zarr_key) + spacing_tczyx = hwz_image.spacing + scales = spacing_tczyx[:2:-1] + + xml_path = Path(ome_xml_filename) + + ns = {"OME": "http://www.openmicroscopy.org/Schemas/OME/2016-06"} + with open(xml_path, "r") as fp: + data = fp.read() + xml_root = ET.fromstring(data) + + layer = neuroglancer.LocalAnnotationLayer( + dimensions=neuroglancer.CoordinateSpace( + names=["x", "y"], + units="µm", + scales=scales, + ), + ) + + viewer_txn.layers[layername] = layer + + for roi in xml_root.iterfind("OME:ROI", ns): + text = roi.attrib["ID"] + if "Name" in roi.attrib: + text = roi.attrib["Name"] + for r in roi.iterfind("./OME:Union/OME:Rectangle", ns): + height = float(r.attrib["Height"]) + width = float(r.attrib["Width"]) + x = float(r.attrib["X"]) + y = float(r.attrib["Y"]) + + a = (x, y) + b = (x + width, y + height) + for label in roi.iterfind("./OME:Union/OME:Label", ns): + text = label.attrib["Text"] + + layer.annotations.append( + neuroglancer.AxisAlignedBoundingBoxAnnotation( + description=text, id=neuroglancer.random_token.make_random_token(), point_a=a, point_b=b + ) + )