Skip to content

Commit

Permalink
fixed detection csv writing'
Browse files Browse the repository at this point in the history
  • Loading branch information
buswinka committed Oct 15, 2021
1 parent aa18142 commit dcfd855
Show file tree
Hide file tree
Showing 18 changed files with 74 additions and 689 deletions.
60 changes: 27 additions & 33 deletions hcat/detect.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import hcat.lib.functional
import hcat.lib.functional as functional
from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter
from hcat.lib.cell import Cell
from hcat.lib.cochlea import Cochlea
from hcat.backends.detection import FasterRCNN_from_url
from hcat.backends.detection import HairCellFasterRCNN
from hcat.lib.utils import warn

import torch
from torch import Tensor
Expand All @@ -14,7 +17,7 @@
import skimage.io as io

import os.path
from typing import List, Dict
from typing import Optional, List, Dict


# DOCUMENTED
Expand All @@ -32,24 +35,14 @@ def _detect(f: str, curve_path: str = None, cell_detection_threshold: float = 0.
"""
print('Initializing hair cell detection algorithm...')
if f is None:
print('\x1b[1;31;40m' + 'ERROR: No File to Analyze... \nAborting.' + '\x1b[0m')
warn('ERROR: No File to Analyze... \nAborting.', color='red')
return None
if not pixel_size:
print('\x1b[1;33;40m'
'WARNING: Pixel Size is not set. Defaults to 288.88 nm x/y. Consider suplying value for optimal performance.'
'\x1b[0m')
warn('WARNING: Pixel Size is not set. Defaults to 288.88 nm x/y. '
'Consider suplying value for optimal performance.', color='yellow')

with torch.no_grad():

device = 'cuda' if torch.cuda.is_available() else 'cpu'
if device == 'cuda':
print('\x1b[1;32;40mCUDA: GPU successfully initialized!\x1b[0m')
else:
print('\x1b[1;33;40m'
'WARNING: GPU not present or CUDA is not correctly intialized for GPU accelerated computation. '
'Analysis may be slow.'
'\x1b[0m')

# Load and preprocess Image
image_base = load(f, 'TileScan 1 Merged', verbose=True) # from hcat.lib.utils
image_base = image_base[[2, 3],...].max(-1) if image_base.ndim == 4 else image_base
Expand All @@ -59,24 +52,19 @@ def _detect(f: str, curve_path: str = None, cell_detection_threshold: float = 0.

dtype = image_base.dtype if dtype is None else dtype
scale: int = hcat.lib.utils.get_dtype_offset(dtype)

device = 'cuda' if torch.cuda.is_available() else 'cpu'

temp = np.zeros(shape)
temp = np.concatenate((temp, image_base)) / scale * 255
io.imsave(f[:-4:]+'.png', temp.transpose((1,2,0)))

c, x, y = image_base.shape
print(
f'DONE: shape: {image_base.shape}, min: {image_base.min()}, max: {image_base.max()}, dtype: {image_base.dtype}')

if image_base.max() < scale * 0.33:
print('\x1b[1;33;40m'
f'WARNING: Image max value less than 1/3 the scale factor for bit depth. Image Max: {image_base.max()},'
f' Scale Factor: {scale}, dtype: {dtype}. Readjusting scale to 1.5 time Image max.'
'\x1b[0m')
scale = image_base.max() * 1.5


warn(f'WARNING: Image max value less than 1/3 the scale factor for bit depth. Image Max: {image_base.max()},'
f' Scale Factor: {scale}, dtype: {dtype}. Readjusting scale to 1.5 time Image max.', color='yellow')
scale = image_base.max() * 1.5

image_base = torch.from_numpy(image_base.astype(np.uint16) / scale).to(device)

Expand All @@ -91,7 +79,11 @@ def _detect(f: str, curve_path: str = None, cell_detection_threshold: float = 0.
# normalize around zero
image_base.sub_(0.5).div_(0.5)


if device == 'cuda':
warn('CUDA: GPU successfully initialized!', color='green')
else:
warn('WARNING: GPU not present or CUDA is not correctly intialized for GPU accelerated computation. '
'Analysis may be slow.', color='yellow')

# Initalize the model...
model = FasterRCNN_from_url(url='https://github.com/buswinka/hcat/blob/master/modelfiles/detection.trch?raw=true', device=device)
Expand Down Expand Up @@ -161,10 +153,8 @@ def _detect(f: str, curve_path: str = None, cell_detection_threshold: float = 0.
curvature, distance, apex = predict_curvature(max_projection, cells, curve_path)

if curvature is None:
print('\x1b[1;33;40mWARNING: ' +
'All three methods to predict hair cell path have failed. Frequency Mapping functionality is limited.'
'Consider Manual Calculation.'
+ '\x1b[0m')
warn('WARNING: All three methods to predict hair cell path have failed. Frequency Mapping functionality is '
'limited. Consider Manual Calculation.', color='yellow')

# curvature estimation really only works if there is a lot of tissue...
if distance is not None and distance.max() > 4000:
Expand All @@ -173,13 +163,16 @@ def _detect(f: str, curve_path: str = None, cell_detection_threshold: float = 0.

else:
curvature, distance, apex = None, None, None
print('\x1b[1;33;40mWARNING: ' +
'Predicted Cochlear Distance is below 4000um. Not sufficient information to determine cell frequency.'
+ '\x1b[0m')
warn('WARNING: Predicted Cochlear Distance is below 4000um. Not sufficient '
'information to determine cell frequency.', color='yellow')

xml = get_xml(f) if f.endswith('.lif') else None
filename = os.path.split(f)[-1]

# remove weird cell ID's
for i, c in enumerate(cells): c.id = i+1


# Store in compressible object for further use
c = Cochlea(mask=None,
filename=filename,
Expand All @@ -192,10 +185,11 @@ def _detect(f: str, curve_path: str = None, cell_detection_threshold: float = 0.
cells=cells,
apex=apex)

if save_xml: cochlea_to_xml(c)
c.write_csv()

if save_xml: cochlea_to_xml(c)
if save_fig: c.make_detect_fig(image_base)
# c.make_cochleogram()

print('')
return c

Expand Down
Binary file removed hcat/lib/__pycache__/cochlea.cpython-38.pyc
Binary file not shown.
67 changes: 41 additions & 26 deletions hcat/lib/cochlea.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@
from skimage.io import imsave
import skimage.segmentation

import torchvision.transforms.functional

class Cochlea:
def __init__(self,
mask: Tensor = None,
Expand Down Expand Up @@ -349,7 +347,7 @@ def _render(mask: Tensor, numcells: int) -> Tensor:
return mask

@graceful_exit('\x1b[1;31;40m' + 'ERROR: csv generation failed.' + '\x1b[0m')
def write_csv(self, filename: Optional[Union[bool, str]] = False) -> None:
def write_csv(self, filename: Optional[Union[bool, str]] = None) -> None:
"""
Write results of cochlea object to a csv file for futher statistical analysis.
Expand All @@ -359,35 +357,52 @@ def write_csv(self, filename: Optional[Union[bool, str]] = False) -> None:
:param filename: filename to save csv as. If unset, uses image filename.
:return: None
"""
label = 'cellID,frequency,percent_loc,x_loc,y_loc,z_loc,volume,summed,'
for c in ['myo', 'dapi', 'actin', 'gfp']:
label += f'{c}_mean,{c}_median,{c}_std,{c}_var,{c}_min,{c}_max,{c}_%zero,{c}_%saturated,'
if self.analysis_type == 'segment':
label = 'cellID,frequency,percent_loc,x_loc,y_loc,z_loc,volume,summed,'
for c in ['myo', 'dapi', 'actin', 'gfp']:
label += f'{c}_mean,{c}_median,{c}_std,{c}_var,{c}_min,{c}_max,{c}_%zero,{c}_%saturated,'

# print(filename)
# print(filename)

if filename is None and self.filename is not None:
filename = os.path.splitext(self.filename)[0] + '.csv' # Remove .lif and add .csv
elif filename is None and self.filename is None:
filename = 'analysis.csv'
if filename is None and self.filename is not None:
filename = os.path.splitext(self.filename)[0] + '.csv' # Remove .lif and add .csv
elif filename is None and self.filename is None:
filename = 'analysis.csv'

f = open(filename, 'w')
f.write(f'Filename: {self.filename}\n')
f.write(f'Analysis Date: {self.analysis_date}\n')
f.write(f'Treatment: {self.analysis_date}\n')
f.write(label[:-1:] + '\n') # index to remove final comma
f = open(filename, 'w')
f.write(f'Filename: {self.filename}\n')
f.write(f'Analysis Date: {self.analysis_date}\n')
f.write(label[:-1:] + '\n') # index to remove final comma

for cell in self.cells:
f.write(f'{cell.id},{cell.frequency},{cell.percent_loc},')
f.write(f'{cell.loc[1]},{cell.loc[2]},{cell.loc[3]},{cell.volume},{cell.summed},')
for cell in self.cells:
f.write(f'{cell.id},{cell.frequency},{cell.percent_loc},')
f.write(f'{cell.loc[1]},{cell.loc[2]},{cell.loc[3]},{cell.volume},{cell.summed},')

for id in cell.channel_names:
f.write(f'{cell.channel_stats[id]["mean"]},{cell.channel_stats[id]["median"]},{cell.channel_stats[id]["std"]},{cell.channel_stats[id]["var"]},')
f.write(f'{cell.channel_stats[id]["min"]},{cell.channel_stats[id]["max"]},{cell.channel_stats[id]["%zero"]},{cell.channel_stats[id]["%saturated"]},')
f.write('\n')
f.close()
elif self.analysis_type == 'detect':
label = 'cellID,type,score,frequency,percent_loc,x_loc,y_loc'

if filename is None and self.path is not None:
filename = os.path.splitext(self.path)[0] + '.csv' # Remove .lif and add .csv
elif filename is None and self.path is None:
filename = 'analysis.csv'
f = open(filename, 'w')
f.write(f'Filename: {self.filename}\n')
f.write(f'Analysis Date: {self.analysis_date}\n')
f.write(label[:-1:] + '\n') # index to remove final comma

for id in cell.channel_names:
f.write(f'{cell.channel_stats[id]["mean"]},{cell.channel_stats[id]["median"]},{cell.channel_stats[id]["std"]},{cell.channel_stats[id]["var"]},')
f.write(f'{cell.channel_stats[id]["min"]},{cell.channel_stats[id]["max"]},{cell.channel_stats[id]["%zero"]},{cell.channel_stats[id]["%saturated"]},')
f.write('\n')
f.close()
for cell in self.cells:
f.write(f'{cell.id},{cell.type},{cell.scores},{cell.frequency},{cell.percent_loc},')
f.write(f'{cell.loc[1]},{cell.loc[2]}')
f.write('\n')
f.close()

# @graceful_exit('\x1b[1;31;40m' + 'ERROR: Figure Render failed.' + '\x1b[0m')
def make_fig(self, filename: Optional[str] = None) -> None:
@graceful_exit('\x1b[1;31;40m' + 'ERROR: Figure Render failed.' + '\x1b[0m')
def make_segment_fig(self, filename: Optional[str] = None) -> None:
"""
Make summary figure for quick interpretation of results.
:param filename: filename to save figure as. If unset, uses image filename.
Expand Down
6 changes: 6 additions & 0 deletions hcat/lib/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,13 @@ def pad_image_with_reflections(image: torch.Tensor, pad_size: Tuple[int] = (30,
########################################################################################################################
# Generics
########################################################################################################################
def warn(message: str, color: str) -> None:
c = {'green' : '\x1b[1;32;40m',
'yellow': '\x1b[1;33;40m',
'red' : '\x1b[1;31;40m',
'norm' : '\x1b[0m'} # green, yellow, red, normal

print(c[color] + message + c['norm'])

def load(file: str, header_name: Optional[str] = 'TileScan 1 Merged',
verbose: bool = False) -> Union[None, np.array]:
Expand Down
24 changes: 0 additions & 24 deletions hcat/models/depreciated/HCBlock.py

This file was deleted.

122 changes: 0 additions & 122 deletions hcat/models/depreciated/HCNet.py

This file was deleted.

Loading

0 comments on commit dcfd855

Please sign in to comment.