Skip to content
This repository has been archived by the owner on Feb 22, 2020. It is now read-only.

Fix memory and styling errors #307

Merged
merged 4 commits into from
Feb 1, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions prediction/src/algorithms/evaluation/evaluate_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def filter_detections(prediction, detection):
# id,confidence,z,y,x,size
# note: z index starts from 1 and unit of y, x, and size is px
# output details in
# ['Dataset', 'Patient', 'StudyInstanceUID', 'SeriesInstanceUID', 'Cancer (%)',
# ['Dataset', 'Patient', 'StudyInstanceUID', 'SeriesInstanceUID', 'Cancer (%)',
# 'Confidence', 'ImageNo','CenterX (px)','CenterY (px)','Diameter (mm)']


Expand All @@ -243,7 +243,7 @@ def get_detection_detail(prediction, detection, filepaths):
spacing = np.load(os.path.join(prep_result_path, id + '_info.npy'))[1]
rows, cols = np.where(filepaths == id)
if rows.size == 0:
print id, 'not found'
print(id, 'not found')
continue
probability = round(get_prediction_probability(prediction, id) * 100, 2)
filepath = filepaths[rows][0]
Expand Down Expand Up @@ -295,7 +295,7 @@ def compare_results(detections, annotations):
if os.path.exists(filepath):
filepaths = np.load(filepath)
detection_detail = get_detection_detail(prediction, detection, filepaths)
print detection_detail
print(detection_detail)
df = pandas.DataFrame(detection_detail)
df.columns = [
'Dataset',
Expand All @@ -311,13 +311,13 @@ def compare_results(detections, annotations):
df.to_csv(detection_detail_file, index=False)

detections = filter_detections(prediction, detection)
print 'number of detections:', len(detections)
print('number of detections:', len(detections))

annotations = load_annotations_OsiriX(annotation_path)
print 'number of annotations:', len(annotations)
print('number of annotations:', len(annotations))

correct_detection = compare_results(detections, annotations)
print 'number of correct_detection:', correct_detection
print('number of correct_detection:', correct_detection)

# calculate precision rate
if len(detections) == 0:
Expand All @@ -337,6 +337,6 @@ def compare_results(detections, annotations):
else:
fscore = 2 * precision * recall / (precision + recall)

print 'precision rate:', correct_detection, '/', len(detections), '(', round(precision, 2), '% )'
print 'recall rate:', correct_detection, '/', len(annotations), '(', round(recall, 2), '% )'
print 'f-score:', round(fscore, 2), '%'
print('precision rate:', correct_detection, '/', len(detections), '(', round(precision, 2), '% )')
print('recall rate:', correct_detection, '/', len(annotations), '(', round(recall, 2), '% )')
print('f-score:', round(fscore, 2), '%')
21 changes: 16 additions & 5 deletions prediction/src/algorithms/segment/src/models/simple_3d_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ def simple_model_3d(input_shape, downsize_filters_factor=32, pool_size=(2, 2, 2)
return model

self.input_shape = (128, 128, 256, 1)
self.scale_factor = 1 / 4.0
self.model = simple_model_3d(input_shape=self.input_shape)
self.best_model_path = super(Simple3DModel, self).get_best_model_path()

Expand All @@ -46,8 +45,14 @@ def _fit(self, X, y):
X_rescaled = np.zeros((X.shape[0], *self.input_shape))
y_rescaled = np.zeros((X.shape[0], *self.input_shape))
for i in range(X.shape[0]):
X_rescaled[i, :, :, :, 0] = zoom(X[i, :, :, :, 0], self.scale_factor)
y_rescaled[i, :, :, :, 0] = zoom(y[i, :, :, :, 0], self.scale_factor)
X_rescaled[i, ..., 0] = zoom(
X[i, ..., 0],
np.array(self.input_shape[:-1]) / np.array(X.shape[1:-1])
)
y_rescaled[i, ..., 0] = zoom(
y[i, ..., 0],
np.array(self.input_shape[:-1]) / np.array(y.shape[1:-1])
)
model_checkpoint = ModelCheckpoint(self.best_model_path, monitor='loss', verbose=1, save_best_only=True)
self.model.fit(X_rescaled, y_rescaled, callbacks=[model_checkpoint], epochs=10)

Expand All @@ -56,8 +61,14 @@ def _predict(self, X):

# Scale the bigger 3D input images to the desired smaller shape
X_rescaled = np.zeros((1, *self.input_shape))
X_rescaled[0, :, :, :, 0] = zoom(X[0, :, :, :, 0], self.scale_factor)
X_rescaled[0, ..., 0] = zoom(
X[0, ..., 0],
np.array(self.input_shape[:-1]) / np.array(X.shape[1:-1])
)

X_predicted = self.model.predict(X_rescaled)
y_predicted[0, :, :, :, 0] = zoom(X_predicted[0, :, :, :, 0], 1 / self.scale_factor)
y_predicted[0, ..., 0] = zoom(
X_predicted[0, ..., 0],
np.array(X.shape[1:-1]) / np.array(self.input_shape[:-1])
)
return y_predicted
20 changes: 16 additions & 4 deletions prediction/src/algorithms/segment/trained_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

from ...algorithms.segment.src.models.simple_3d_model import Simple3DModel
from ...preprocess.load_ct import load_ct, MetaData
from ...preprocess.preprocess_ct import mm_coordinates_to_voxel
from ...preprocess.lung_segmentation import DATA_SHAPE


Expand Down Expand Up @@ -48,7 +49,7 @@ def predict(dicom_path, centroids):
input_data[0, :voxel_data.shape[0], :voxel_data.shape[1], :voxel_data.shape[2], 0] = voxel_data
model = Simple3DModel().load_best()
segment_path = model.predict(input_data)
volumes = calculate_volume(segment_path, centroids)
volumes = calculate_volume(segment_path, centroids, dicom_path)
return {'binary_mask_path': segment_path, 'volumes': volumes}


Expand All @@ -72,16 +73,27 @@ def calculate_volume(segment_path, centroids, ct_path=None):
list[float]: a list of volumes in cubic mm (if a ct_path has been provided)
of a connected component for each centroid.
"""
if not centroids:
return[]

mask = np.load(segment_path)
mask, _ = scipy.ndimage.label(mask)
labels = [mask[centroid['x'], centroid['y'], centroid['z']] for centroid in centroids]
volumes = np.bincount(mask.flatten())
volumes = volumes[labels].tolist()

if ct_path:
meta = load_ct(ct_path, voxel=False)
meta = MetaData(meta)

coords = [[centroid['z'], centroid['y'], centroid['x']] for centroid in centroids]

if ct_path:
coords = [mm_coordinates_to_voxel(coord, meta) for coord in coords]

labels = [mask[coord[0], coord[1], coord[2]] for coord in coords]

volumes = np.bincount(mask.flatten())
volumes = volumes[labels].tolist()

if ct_path:
spacing = np.prod(meta.spacing)
volumes = [volume * spacing for volume in volumes]

Expand Down
5 changes: 4 additions & 1 deletion prediction/src/preprocess/lung_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,10 @@
except ValueError:
from config import Config

DATA_SHAPE = (512, 512, 1024, 1)
# P{|282 - h| <= 66} ~= 0.997, according to the Table 2 from the research paper, DOI10.1097/HP.0b013e31823a13f1
# Taking into an account the spacing on z-axis >= 0.9
# (282 + 66) / 0.9 = 386 voxels. Therefore, 512 voxels for z-axis should be enough for all cases.
DATA_SHAPE = (512, 512, 512, 1)


def get_z_range(dicom_path):
Expand Down
20 changes: 14 additions & 6 deletions prediction/src/tests/test_volume_calculation.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,24 @@
def centroids(scope='session'):
yield [
{'x': 0, 'y': 0, 'z': 0},
{'x': 32, 'y': 32, 'z': 28},
{'x': 45, 'y': 45, 'z': 12}]
{'x': 28, 'y': 32, 'z': 32},
{'x': 12, 'y': 45, 'z': 45}]


@pytest.fixture
def centroids_alt(scope='session'):
yield [
{'x': 0, 'y': 0, 'z': 0},
{'x': 0, 'y': 0, 'z': 0},
{'x': 45, 'y': 45, 'z': 12}]
{'x': 12, 'y': 45, 'z': 45}]


@pytest.fixture
def centroids_clt(scope='session'):
yield [
{'x': 0, 'y': 0, 'z': 0},
{'x': 0, 'y': 0, 'z': 0},
{'x': 8.3, 'y': 31.5, 'z': 113}]


@pytest.fixture
Expand All @@ -40,7 +48,7 @@ def generate_mask(centroids, volumes, shape=(50, 50, 29)):
mask = np.zeros(shape, dtype=np.bool_)

for centroid, volume in zip(centroids, volumes):
centroid_ = np.asarray([centroid['x'], centroid['y'], centroid['z']])
centroid_ = np.asarray([centroid['z'], centroid['y'], centroid['x']])
free_voxels = np.where(mask != -1)
free_voxels = np.asarray(free_voxels).T
free_voxels = sorted(free_voxels, key=lambda x: np.linalg.norm(x - centroid_, ord=2))
Expand Down Expand Up @@ -78,14 +86,14 @@ def test_overlapped_volume_calculation(tmpdir, centroids_alt, volumes_alt):
assert calculated_volumes == volumes_alt


def test_overlapped_dicom_volume_calculation(tmpdir, dicom_path, centroids_alt, volumes_alt):
def test_overlapped_dicom_volume_calculation(tmpdir, dicom_path, centroids_alt, centroids_clt, volumes_alt):
mask = generate_mask(centroids_alt, volumes_alt)

# The balls area must be 100 + 30, since first ball have overlapped with the second one
assert mask.sum() == 130

path = get_mask_path(tmpdir, mask)
calculated_volumes = calculate_volume(str(path), centroids_alt, dicom_path)
calculated_volumes = calculate_volume(str(path), centroids_clt, dicom_path)

# Despite they are overlapped, the amount of volumes must have preserved
assert len(calculated_volumes) == len(volumes_alt)
Expand Down