-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
280 lines (230 loc) · 11.7 KB
/
utils.py
File metadata and controls
280 lines (230 loc) · 11.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
import os
import torch
import pandas as pd
import shutil
import numpy as np
import h5py
import openslide
from PIL import Image
import json
import open_clip_custom as conch_clip
import random
### WSI preprocessing ###
def save_hdf5(output_path, asset_dict, attr_dict= None, mode='a'):
file = h5py.File(output_path, mode)
for key, val in asset_dict.items():
data_shape = val.shape
if key not in file:
data_type = val.dtype
chunk_shape = (1, ) + data_shape[1:]
maxshape = (None, ) + data_shape[1:]
dset = file.create_dataset(key, shape=data_shape, maxshape=maxshape, chunks=chunk_shape, dtype=data_type)
dset[:] = val
if attr_dict is not None:
if key in attr_dict.keys():
for attr_key, attr_val in attr_dict[key].items():
dset.attrs[attr_key] = attr_val
else:
dset = file[key]
dset.resize(len(dset) + data_shape[0], axis=0)
dset[-data_shape[0]:] = val
file.close()
return output_path
def divide_multiclass_data(h5_path_lst, label_csv, shots=None): #the same in WsiDataset.py
label_df = pd.read_csv(label_csv)
paths = []
classes = []
for path in h5_path_lst:
slide_id = os.path.basename(path)[:-3] # Extract the slide ID by removing the .h5 extension
row = label_df[label_df['slide_id'] == slide_id]
if not row.empty:
diagnosis = row.iloc[0]['Diagnosis']
if diagnosis not in classes:
classes.append(diagnosis)
paths.append([path])
else:
idx = classes.index(diagnosis)
if shots:
if len(paths[idx]) < shots:
paths[idx].append(path)
else:
paths[idx].append(path)
else:
print(f"Warning: Label of {slide_id} not found in the CSV file.")
continue # Skip this h5 file if no corresponding row is found in the CSV file
return paths, classes
def get_h5_paths(dir_path):
h5_files = []
for root, dirs, files in os.walk(dir_path):
for file in files:
if file.endswith('.h5'):
h5_files.append(os.path.join(root, file))
return h5_files
def read_h5(path):
with h5py.File(path, 'r') as f:
coords = f['coords'][:]
features = f['features'][:]
return coords, features
def extract_patch_dataset_embeddings(data_path, model_path, save_path, device):
conch_model, preprocess = conch_clip.create_model_from_pretrained("conch_ViT-B-16", checkpoint_path=model_path)
conch_model.to(device)
# 确保 save_path 存在
os.makedirs(save_path, exist_ok=True)
# 遍历 data_path 目录下的所有文件
for img_name in os.listdir(data_path):
if img_name.endswith('.png'):
img_path = os.path.join(data_path, img_name)
save_file_path = os.path.join(save_path, f"{os.path.splitext(img_name)[0]}.pt")
# 检查 save_file_path 是否已经存在
if os.path.exists(save_file_path):
print(f"Skipping image {img_name} as embedding already exists at {save_file_path}")
continue
# 打开并预处理图像
image = Image.open(img_path)
image = preprocess(image).unsqueeze(0).to(device)
#print(image.shape)
# 提取图像嵌入
with torch.inference_mode():
image_embs = conch_model.encode_image(image, proj_contrast=False, normalize=False)
#print(image_embs.shape)
# 保存嵌入
print(f'Saving image {img_name} embedding to {save_file_path}')
torch.save(image_embs, save_file_path)
def ens_feat_label_pt(pt_dir_path, csv_path, save_path, save_data_name, save_label_name):
datas = []
labels = []
df = pd.read_csv(csv_path)
for pt_name in os.listdir(pt_dir_path):
if pt_name.endswith('.pt'):
pt_path = os.path.join(pt_dir_path, pt_name)
print(f'loading {pt_name}')
png_name = pt_name.replace('.pt', '.png')
row = df[df['image_name'] == png_name]
label = row.iloc[0]['classification']
pt_data = torch.load(pt_path)
datas.append(pt_data)
labels.append(label)
datas = torch.stack(datas)
print(datas.shape)
labels = torch.tensor(labels)
print(labels.shape)
torch.save(datas, save_path + '/' + save_data_name + '.pt')
torch.save(labels, save_path + '/' + save_label_name + '.pt')
def generate_patch_mask(h5_path_lst, mask_dir, save_dir, patch_size=224):
for h5_path in h5_path_lst:
slide_id = os.path.basename(h5_path)[:-3]
print(f"loading {slide_id} in {h5_path}")
coords, data = read_h5(h5_path)
mask_file = mask_dir + "/" + slide_id + "_mask.tif"
mask_wsi = openslide.open_slide(mask_file)
patch_labels = []
for coord in coords:
patch_mask = np.array(mask_wsi.read_region([int(coord[0]), int(coord[1])], 0, (patch_size, patch_size)).convert('L'))
mask_nonzero = np.count_nonzero(patch_mask)
if mask_nonzero > patch_size**2 / 2:
patch_labels.append(1)
else:
patch_labels.append(0)
save_path = os.path.join(save_dir, slide_id + "_patch_mask.npy")
np.save(save_path, np.array(patch_labels))
def get_cancer_ratio(h5_path_lst, patch_mask_dir):
cancer_ratio_lst = []
for h5_path in h5_path_lst:
slide_id = os.path.basename(h5_path)[:-3]
patch_label_path = os.path.join(patch_mask_dir, slide_id + "_patch_mask.npy")
patch_label = np.load(patch_label_path)
cancer_num = np.sum(patch_label == 1)
cancer_ratio = cancer_num / len(patch_label)
cancer_ratio_lst.append(cancer_ratio)
return np.mean(cancer_ratio_lst), cancer_ratio_lst
def divide_cm_dataset():
csv_path = '/ailab/group/pjlab-medai/zhouxiao/pathology/data/cptac_WSI_data/cptac_cm_detection.csv'
source_dir = '/ailab/group/pjlab-medai/zhouxiao/pathology/features/conch/CM/h5_files'
train_dir = '/ailab/group/pjlab-medai/zhouxiao/pathology/features/conch/CM/CM_train'
test_dir = '/ailab/group/pjlab-medai/zhouxiao/pathology/features/conch/CM/CM_test'
# Load the CSV file into a DataFrame
df = pd.read_csv(csv_path)
# Get the list of .h5 files in the source directory
h5_files = get_h5_paths(source_dir)
# Iterate through each .h5 file and assign it to train or test based on slide_id in CSV
for h5_file in h5_files:
# Extract the base name of the .h5 file (without extension) by splitting with "\\" and taking the last part
base_name = os.path.split(h5_file)[-1][:-3]
# Check if the slide_id is in the CSV and assign to test or train
if base_name in df['slide_id'].values:
target_dir = test_dir
else:
target_dir = train_dir
# Create the destination directory if it doesn't exist
os.makedirs(target_dir, exist_ok=True)
# Move the .h5 file to the appropriate directory
shutil.move(h5_file, os.path.join(target_dir, os.path.basename(h5_file)))
def divide_gbmlgg_dataset(gbm_dir_path, lgg_dir_path, label_csv):
gbm_h5_paths = get_h5_paths(gbm_dir_path)
lgg_h5_paths = get_h5_paths(lgg_dir_path)
h5_path_lst = gbm_h5_paths + lgg_h5_paths
random.shuffle(h5_path_lst)
paths, classnames = divide_multiclass_data(h5_path_lst, label_csv)
classA_paths, classB_paths, classC_paths = paths
random.shuffle(classA_paths)
random.shuffle(classB_paths)
random.shuffle(classC_paths)
print("classA_paths:", len(classA_paths), "classB_paths:", len(classB_paths), "classC_paths:", len(classC_paths))
half_classA_len, half_classB_len, half_classC_len = len(classA_paths) // 2, len(classB_paths) // 2, len(classC_paths) // 2
# 注意这里的三个class的顺序不能换(train和test要一致),否则dataset load的时候的classnames就不一致了
test_h5_path_lst = classA_paths[:half_classA_len] + classB_paths[:half_classB_len] + classC_paths[:half_classC_len]
test_idxs = [os.path.basename(path)[:-3] for path in test_h5_path_lst]
return test_idxs
def load_prompts_from_template(json_path, classnames_example=None):#这里的example是有normal的,是从csv(testset)得到的diagnosis result
with open(json_path, 'r') as file:
data = json.load(file)
prompt_list = []
classname_list = []
if classnames_example:
classnames_key = [classname[:3].lower() for classname in classnames_example]
for key, value in data.items():
classnames = value['classnames']
template = value['templates']
prompts = []
classes = []
if classnames_example:
key_dict = {key[:3].lower():value for key, value in classnames.items()}
for key in classnames_key:
class_value = key_dict[key]
new_string = template.replace('CLASSNAME', class_value)
prompts.append(new_string)
classes.append(class_value)
else:
for class_name_type, class_value in classnames.items(): #normal 0, tumor 1
new_string = template.replace('CLASSNAME', class_value)
prompts.append(new_string)
classes.append(class_value)
prompt_list.append(prompts)
classname_list.append(classes)
return prompt_list, classname_list
def unique_classnames(lst):
classnum = len(lst[0])
unique_lst = [[] for n in range(classnum)]
for classnames in lst:
for i, classname in enumerate(classnames):
classname = classname.lower() # 将classname转换为小写
if classname not in unique_lst[i]: # 检查unique_lst[i]中是否已存在该classname
unique_lst[i].append(classname) # 如果不存在,则添加
return unique_lst
def rand():
return torch.randn((2,2))
if __name__ == "__main__":
# extract_patch_dataset_embeddings(data_path='/ailab/group/pjlab-medai/zhouxiao/pathology/data/patch_level_data/patch_classification/Patchcamelyon/valid/images',
# model_path="/ailab/group/pjlab-medai/zhouxiao/pathology/model/conch/pytorch_model.bin",
# save_path='/ailab/group/pjlab-medai/zhouxiao/pathology/features/conch/patchCamelyon/valid',
# device='cuda:0')
# print('done')
# ens_feat_label_pt(pt_dir_path='/ailab/group/pjlab-medai/zhouxiao/pathology/features/conch/patchCamelyon/test',
# csv_path='/ailab/group/pjlab-medai/zhouxiao/pathology/data/patch_level_data/patch_classification/Patchcamelyon/test/labels.csv',
# save_path='/ailab/group/pjlab-medai/zhouxiao/pathology/features/conch/patchCamelyon',
# save_data_name='test',
# save_label_name='test_label')
test_idxs = divide_gbmlgg_dataset(gbm_dir_path='/ailab/group/pjlab-medai/zhouxiao/pathology/features/conch/GBM_test/h5_files',
lgg_dir_path='/ailab/group/pjlab-medai/zhouxiao/pathology/features/conch/LGG_test/h5_files',
label_csv='/ailab/group/pjlab-medai/zhouxiao/pathology/data/tcga_WSI_data/tcga_brain_test.csv')
print(test_idxs)