修正test结果
This commit is contained in:
parent
8209b13e08
commit
ba8aaea01b
@ -7,7 +7,7 @@
|
|||||||
@email:2021022362@m.scnu.edu.cn
|
@email:2021022362@m.scnu.edu.cn
|
||||||
@time:2022/02/21
|
@time:2022/02/21
|
||||||
"""
|
"""
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -69,13 +69,30 @@ columns = ["sampNo", "segmentNo", "label_type", "new_label", "SP", "EP", "pred"]
|
|||||||
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
||||||
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
||||||
|
|
||||||
|
logging.getLogger('matplotlib.font_manager').disabled = True
|
||||||
|
logging.getLogger('matplotlib.ticker').disabled = True
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
ch.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
if (exam_path / "test.log").exists():
|
||||||
|
(exam_path / "test.log").unlink()
|
||||||
|
fh = logging.FileHandler(exam_path / "test.log", mode='a')
|
||||||
|
fh.setLevel(logging.INFO)
|
||||||
|
fh.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
logger.addHandler(fh)
|
||||||
|
logger.info("------------------------------------")
|
||||||
|
|
||||||
|
|
||||||
def set_environment(i):
|
def set_environment(i):
|
||||||
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
||||||
model, model_name, train_set, test_set
|
model, model_name, train_set, test_set
|
||||||
|
|
||||||
output_path = all_output_path[i]
|
output_path = all_output_path[i]
|
||||||
print(output_path)
|
logger.info(output_path)
|
||||||
segments_results_save_path = (output_path / "segments_results")
|
segments_results_save_path = (output_path / "segments_results")
|
||||||
segments_results_save_path.mkdir(exist_ok=True)
|
segments_results_save_path.mkdir(exist_ok=True)
|
||||||
events_results_save_path = (output_path / "events_results")
|
events_results_save_path = (output_path / "events_results")
|
||||||
@ -102,6 +119,12 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
sampNo = train_set
|
sampNo = train_set
|
||||||
elif dataset_type == "all_test":
|
elif dataset_type == "all_test":
|
||||||
sampNo = test_set
|
sampNo = test_set
|
||||||
|
else:
|
||||||
|
sampNo = None
|
||||||
|
logger.info("出错了")
|
||||||
|
|
||||||
|
exam_name = Path("./").absolute().name
|
||||||
|
|
||||||
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
||||||
segment_augment=my_segment_augment)
|
segment_augment=my_segment_augment)
|
||||||
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
||||||
@ -142,7 +165,8 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
test_loss /= len(test_loader)
|
test_loss /= len(test_loader)
|
||||||
calc_metrics.compute()
|
calc_metrics.compute()
|
||||||
print(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
logger.info(f"EXAM_NAME: {exam_name} SampNO: {sampNo}")
|
||||||
|
logger.info(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
||||||
calc_metrics.reset()
|
calc_metrics.reset()
|
||||||
|
|
||||||
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
||||||
@ -177,7 +201,7 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
||||||
if df_result.empty:
|
if df_result.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
||||||
@ -314,11 +338,11 @@ def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
|||||||
|
|
||||||
def confusionMatrix(df_analysis, base_path, dataset_type):
|
def confusionMatrix(df_analysis, base_path, dataset_type):
|
||||||
if df_analysis is None:
|
if df_analysis is None:
|
||||||
print(base_path, dataset_type, "is None")
|
logger.info(base_path, dataset_type, "is None")
|
||||||
return
|
return
|
||||||
|
|
||||||
if df_analysis.empty:
|
if df_analysis.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return
|
return
|
||||||
classes = ["normal", "SA"]
|
classes = ["normal", "SA"]
|
||||||
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
||||||
@ -411,8 +435,8 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
||||||
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
if value > 30:
|
# if value > 30:
|
||||||
print([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
# logger.info([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
||||||
# 长度不够
|
# 长度不够
|
||||||
else:
|
else:
|
||||||
df["thresh_Pred"][SP:EP] = 0
|
df["thresh_Pred"][SP:EP] = 0
|
||||||
@ -426,7 +450,7 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame(
|
df_event = df_event.append(pd.DataFrame(
|
||||||
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
||||||
(segment_no + 1) * 30, 0]], columns=columns),
|
(segment_no + 1) * 30, 0]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
|
|
||||||
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
||||||
|
|
||||||
@ -450,5 +474,5 @@ if __name__ == '__main__':
|
|||||||
for exam_index, test_exam_path in enumerate(all_output_path):
|
for exam_index, test_exam_path in enumerate(all_output_path):
|
||||||
# test_exam_path = exam_path / test_exam_path
|
# test_exam_path = exam_path / test_exam_path
|
||||||
set_environment(exam_index)
|
set_environment(exam_index)
|
||||||
# test_and_analysis_and_visual(dataset_type="test")
|
test_and_analysis_and_visual(dataset_type="test")
|
||||||
test_and_analysis_and_visual(dataset_type="all_test")
|
test_and_analysis_and_visual(dataset_type="all_test")
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
@email:2021022362@m.scnu.edu.cn
|
@email:2021022362@m.scnu.edu.cn
|
||||||
@time:2022/02/21
|
@time:2022/02/21
|
||||||
"""
|
"""
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -69,13 +69,30 @@ columns = ["sampNo", "segmentNo", "label_type", "new_label", "SP", "EP", "pred"]
|
|||||||
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
||||||
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
||||||
|
|
||||||
|
logging.getLogger('matplotlib.font_manager').disabled = True
|
||||||
|
logging.getLogger('matplotlib.ticker').disabled = True
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
ch.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
if (exam_path / "test.log").exists():
|
||||||
|
(exam_path / "test.log").unlink()
|
||||||
|
fh = logging.FileHandler(exam_path / "test.log", mode='a')
|
||||||
|
fh.setLevel(logging.INFO)
|
||||||
|
fh.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
logger.addHandler(fh)
|
||||||
|
logger.info("------------------------------------")
|
||||||
|
|
||||||
|
|
||||||
def set_environment(i):
|
def set_environment(i):
|
||||||
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
||||||
model, model_name, train_set, test_set
|
model, model_name, train_set, test_set
|
||||||
|
|
||||||
output_path = all_output_path[i]
|
output_path = all_output_path[i]
|
||||||
print(output_path)
|
logger.info(output_path)
|
||||||
segments_results_save_path = (output_path / "segments_results")
|
segments_results_save_path = (output_path / "segments_results")
|
||||||
segments_results_save_path.mkdir(exist_ok=True)
|
segments_results_save_path.mkdir(exist_ok=True)
|
||||||
events_results_save_path = (output_path / "events_results")
|
events_results_save_path = (output_path / "events_results")
|
||||||
@ -102,6 +119,12 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
sampNo = train_set
|
sampNo = train_set
|
||||||
elif dataset_type == "all_test":
|
elif dataset_type == "all_test":
|
||||||
sampNo = test_set
|
sampNo = test_set
|
||||||
|
else:
|
||||||
|
sampNo = None
|
||||||
|
logger.info("出错了")
|
||||||
|
|
||||||
|
exam_name = Path("./").absolute().name
|
||||||
|
|
||||||
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
||||||
segment_augment=my_segment_augment)
|
segment_augment=my_segment_augment)
|
||||||
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
||||||
@ -142,7 +165,8 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
test_loss /= len(test_loader)
|
test_loss /= len(test_loader)
|
||||||
calc_metrics.compute()
|
calc_metrics.compute()
|
||||||
print(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
logger.info(f"EXAM_NAME: {exam_name} SampNO: {sampNo}")
|
||||||
|
logger.info(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
||||||
calc_metrics.reset()
|
calc_metrics.reset()
|
||||||
|
|
||||||
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
||||||
@ -177,7 +201,7 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
||||||
if df_result.empty:
|
if df_result.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
||||||
@ -314,11 +338,11 @@ def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
|||||||
|
|
||||||
def confusionMatrix(df_analysis, base_path, dataset_type):
|
def confusionMatrix(df_analysis, base_path, dataset_type):
|
||||||
if df_analysis is None:
|
if df_analysis is None:
|
||||||
print(base_path, dataset_type, "is None")
|
logger.info(base_path, dataset_type, "is None")
|
||||||
return
|
return
|
||||||
|
|
||||||
if df_analysis.empty:
|
if df_analysis.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return
|
return
|
||||||
classes = ["normal", "SA"]
|
classes = ["normal", "SA"]
|
||||||
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
||||||
@ -411,8 +435,8 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
||||||
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
if value > 30:
|
# if value > 30:
|
||||||
print([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
# logger.info([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
||||||
# 长度不够
|
# 长度不够
|
||||||
else:
|
else:
|
||||||
df["thresh_Pred"][SP:EP] = 0
|
df["thresh_Pred"][SP:EP] = 0
|
||||||
@ -426,7 +450,7 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame(
|
df_event = df_event.append(pd.DataFrame(
|
||||||
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
||||||
(segment_no + 1) * 30, 0]], columns=columns),
|
(segment_no + 1) * 30, 0]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
|
|
||||||
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
||||||
|
|
||||||
@ -446,9 +470,9 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
# shap_values = explainer.shap_values()
|
# shap_values = explainer.shap_values()
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
all_output_path = list(exam_path.rglob("KFold_0"))
|
all_output_path = list(exam_path.rglob("KFold_*"))
|
||||||
for exam_index, test_exam_path in enumerate(all_output_path):
|
for exam_index, test_exam_path in enumerate(all_output_path):
|
||||||
# test_exam_path = exam_path / test_exam_path
|
# test_exam_path = exam_path / test_exam_path
|
||||||
set_environment(exam_index)
|
set_environment(exam_index)
|
||||||
# test_and_analysis_and_visual(dataset_type="test")
|
test_and_analysis_and_visual(dataset_type="test")
|
||||||
test_and_analysis_and_visual(dataset_type="all_test")
|
test_and_analysis_and_visual(dataset_type="all_test")
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
@email:2021022362@m.scnu.edu.cn
|
@email:2021022362@m.scnu.edu.cn
|
||||||
@time:2022/02/21
|
@time:2022/02/21
|
||||||
"""
|
"""
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -69,13 +69,30 @@ columns = ["sampNo", "segmentNo", "label_type", "new_label", "SP", "EP", "pred"]
|
|||||||
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
||||||
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
||||||
|
|
||||||
|
logging.getLogger('matplotlib.font_manager').disabled = True
|
||||||
|
logging.getLogger('matplotlib.ticker').disabled = True
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
ch.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
if (exam_path / "test.log").exists():
|
||||||
|
(exam_path / "test.log").unlink()
|
||||||
|
fh = logging.FileHandler(exam_path / "test.log", mode='a')
|
||||||
|
fh.setLevel(logging.INFO)
|
||||||
|
fh.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
logger.addHandler(fh)
|
||||||
|
logger.info("------------------------------------")
|
||||||
|
|
||||||
|
|
||||||
def set_environment(i):
|
def set_environment(i):
|
||||||
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
||||||
model, model_name, train_set, test_set
|
model, model_name, train_set, test_set
|
||||||
|
|
||||||
output_path = all_output_path[i]
|
output_path = all_output_path[i]
|
||||||
print(output_path)
|
logger.info(output_path)
|
||||||
segments_results_save_path = (output_path / "segments_results")
|
segments_results_save_path = (output_path / "segments_results")
|
||||||
segments_results_save_path.mkdir(exist_ok=True)
|
segments_results_save_path.mkdir(exist_ok=True)
|
||||||
events_results_save_path = (output_path / "events_results")
|
events_results_save_path = (output_path / "events_results")
|
||||||
@ -102,6 +119,12 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
sampNo = train_set
|
sampNo = train_set
|
||||||
elif dataset_type == "all_test":
|
elif dataset_type == "all_test":
|
||||||
sampNo = test_set
|
sampNo = test_set
|
||||||
|
else:
|
||||||
|
sampNo = None
|
||||||
|
logger.info("出错了")
|
||||||
|
|
||||||
|
exam_name = Path("./").absolute().name
|
||||||
|
|
||||||
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
||||||
segment_augment=my_segment_augment)
|
segment_augment=my_segment_augment)
|
||||||
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
||||||
@ -142,7 +165,8 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
test_loss /= len(test_loader)
|
test_loss /= len(test_loader)
|
||||||
calc_metrics.compute()
|
calc_metrics.compute()
|
||||||
print(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
logger.info(f"EXAM_NAME: {exam_name} SampNO: {sampNo}")
|
||||||
|
logger.info(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
||||||
calc_metrics.reset()
|
calc_metrics.reset()
|
||||||
|
|
||||||
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
||||||
@ -177,7 +201,7 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
||||||
if df_result.empty:
|
if df_result.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
||||||
@ -314,11 +338,11 @@ def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
|||||||
|
|
||||||
def confusionMatrix(df_analysis, base_path, dataset_type):
|
def confusionMatrix(df_analysis, base_path, dataset_type):
|
||||||
if df_analysis is None:
|
if df_analysis is None:
|
||||||
print(base_path, dataset_type, "is None")
|
logger.info(base_path, dataset_type, "is None")
|
||||||
return
|
return
|
||||||
|
|
||||||
if df_analysis.empty:
|
if df_analysis.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return
|
return
|
||||||
classes = ["normal", "SA"]
|
classes = ["normal", "SA"]
|
||||||
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
||||||
@ -411,8 +435,8 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
||||||
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
if value > 30:
|
# if value > 30:
|
||||||
print([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
# logger.info([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
||||||
# 长度不够
|
# 长度不够
|
||||||
else:
|
else:
|
||||||
df["thresh_Pred"][SP:EP] = 0
|
df["thresh_Pred"][SP:EP] = 0
|
||||||
@ -426,7 +450,7 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame(
|
df_event = df_event.append(pd.DataFrame(
|
||||||
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
||||||
(segment_no + 1) * 30, 0]], columns=columns),
|
(segment_no + 1) * 30, 0]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
|
|
||||||
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
||||||
|
|
||||||
@ -446,9 +470,9 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
# shap_values = explainer.shap_values()
|
# shap_values = explainer.shap_values()
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
all_output_path = list(exam_path.rglob("KFold_0"))
|
all_output_path = list(exam_path.rglob("KFold_*"))
|
||||||
for exam_index, test_exam_path in enumerate(all_output_path):
|
for exam_index, test_exam_path in enumerate(all_output_path):
|
||||||
# test_exam_path = exam_path / test_exam_path
|
# test_exam_path = exam_path / test_exam_path
|
||||||
set_environment(exam_index)
|
set_environment(exam_index)
|
||||||
# test_and_analysis_and_visual(dataset_type="test")
|
test_and_analysis_and_visual(dataset_type="test")
|
||||||
test_and_analysis_and_visual(dataset_type="all_test")
|
test_and_analysis_and_visual(dataset_type="all_test")
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
@email:2021022362@m.scnu.edu.cn
|
@email:2021022362@m.scnu.edu.cn
|
||||||
@time:2022/02/21
|
@time:2022/02/21
|
||||||
"""
|
"""
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -69,13 +69,30 @@ columns = ["sampNo", "segmentNo", "label_type", "new_label", "SP", "EP", "pred"]
|
|||||||
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
||||||
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
||||||
|
|
||||||
|
logging.getLogger('matplotlib.font_manager').disabled = True
|
||||||
|
logging.getLogger('matplotlib.ticker').disabled = True
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
ch.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
if (exam_path / "test.log").exists():
|
||||||
|
(exam_path / "test.log").unlink()
|
||||||
|
fh = logging.FileHandler(exam_path / "test.log", mode='a')
|
||||||
|
fh.setLevel(logging.INFO)
|
||||||
|
fh.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
logger.addHandler(fh)
|
||||||
|
logger.info("------------------------------------")
|
||||||
|
|
||||||
|
|
||||||
def set_environment(i):
|
def set_environment(i):
|
||||||
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
||||||
model, model_name, train_set, test_set
|
model, model_name, train_set, test_set
|
||||||
|
|
||||||
output_path = all_output_path[i]
|
output_path = all_output_path[i]
|
||||||
print(output_path)
|
logger.info(output_path)
|
||||||
segments_results_save_path = (output_path / "segments_results")
|
segments_results_save_path = (output_path / "segments_results")
|
||||||
segments_results_save_path.mkdir(exist_ok=True)
|
segments_results_save_path.mkdir(exist_ok=True)
|
||||||
events_results_save_path = (output_path / "events_results")
|
events_results_save_path = (output_path / "events_results")
|
||||||
@ -102,6 +119,12 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
sampNo = train_set
|
sampNo = train_set
|
||||||
elif dataset_type == "all_test":
|
elif dataset_type == "all_test":
|
||||||
sampNo = test_set
|
sampNo = test_set
|
||||||
|
else:
|
||||||
|
sampNo = None
|
||||||
|
logger.info("出错了")
|
||||||
|
|
||||||
|
exam_name = Path("./").absolute().name
|
||||||
|
|
||||||
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
||||||
segment_augment=my_segment_augment)
|
segment_augment=my_segment_augment)
|
||||||
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
||||||
@ -142,7 +165,8 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
test_loss /= len(test_loader)
|
test_loss /= len(test_loader)
|
||||||
calc_metrics.compute()
|
calc_metrics.compute()
|
||||||
print(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
logger.info(f"EXAM_NAME: {exam_name} SampNO: {sampNo}")
|
||||||
|
logger.info(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
||||||
calc_metrics.reset()
|
calc_metrics.reset()
|
||||||
|
|
||||||
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
||||||
@ -177,7 +201,7 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
||||||
if df_result.empty:
|
if df_result.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
||||||
@ -314,11 +338,11 @@ def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
|||||||
|
|
||||||
def confusionMatrix(df_analysis, base_path, dataset_type):
|
def confusionMatrix(df_analysis, base_path, dataset_type):
|
||||||
if df_analysis is None:
|
if df_analysis is None:
|
||||||
print(base_path, dataset_type, "is None")
|
logger.info(base_path, dataset_type, "is None")
|
||||||
return
|
return
|
||||||
|
|
||||||
if df_analysis.empty:
|
if df_analysis.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return
|
return
|
||||||
classes = ["normal", "SA"]
|
classes = ["normal", "SA"]
|
||||||
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
||||||
@ -411,8 +435,8 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
||||||
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
if value > 30:
|
# if value > 30:
|
||||||
print([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
# logger.info([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
||||||
# 长度不够
|
# 长度不够
|
||||||
else:
|
else:
|
||||||
df["thresh_Pred"][SP:EP] = 0
|
df["thresh_Pred"][SP:EP] = 0
|
||||||
@ -426,7 +450,7 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame(
|
df_event = df_event.append(pd.DataFrame(
|
||||||
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
||||||
(segment_no + 1) * 30, 0]], columns=columns),
|
(segment_no + 1) * 30, 0]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
|
|
||||||
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
@email:2021022362@m.scnu.edu.cn
|
@email:2021022362@m.scnu.edu.cn
|
||||||
@time:2022/02/21
|
@time:2022/02/21
|
||||||
"""
|
"""
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -69,13 +69,30 @@ columns = ["sampNo", "segmentNo", "label_type", "new_label", "SP", "EP", "pred"]
|
|||||||
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
||||||
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
||||||
|
|
||||||
|
logging.getLogger('matplotlib.font_manager').disabled = True
|
||||||
|
logging.getLogger('matplotlib.ticker').disabled = True
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
ch.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
if (exam_path / "test.log").exists():
|
||||||
|
(exam_path / "test.log").unlink()
|
||||||
|
fh = logging.FileHandler(exam_path / "test.log", mode='a')
|
||||||
|
fh.setLevel(logging.INFO)
|
||||||
|
fh.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
logger.addHandler(fh)
|
||||||
|
logger.info("------------------------------------")
|
||||||
|
|
||||||
|
|
||||||
def set_environment(i):
|
def set_environment(i):
|
||||||
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
||||||
model, model_name, train_set, test_set
|
model, model_name, train_set, test_set
|
||||||
|
|
||||||
output_path = all_output_path[i]
|
output_path = all_output_path[i]
|
||||||
print(output_path)
|
logger.info(output_path)
|
||||||
segments_results_save_path = (output_path / "segments_results")
|
segments_results_save_path = (output_path / "segments_results")
|
||||||
segments_results_save_path.mkdir(exist_ok=True)
|
segments_results_save_path.mkdir(exist_ok=True)
|
||||||
events_results_save_path = (output_path / "events_results")
|
events_results_save_path = (output_path / "events_results")
|
||||||
@ -102,6 +119,12 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
sampNo = train_set
|
sampNo = train_set
|
||||||
elif dataset_type == "all_test":
|
elif dataset_type == "all_test":
|
||||||
sampNo = test_set
|
sampNo = test_set
|
||||||
|
else:
|
||||||
|
sampNo = None
|
||||||
|
logger.info("出错了")
|
||||||
|
|
||||||
|
exam_name = Path("./").absolute().name
|
||||||
|
|
||||||
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
||||||
segment_augment=my_segment_augment)
|
segment_augment=my_segment_augment)
|
||||||
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
||||||
@ -142,7 +165,8 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
test_loss /= len(test_loader)
|
test_loss /= len(test_loader)
|
||||||
calc_metrics.compute()
|
calc_metrics.compute()
|
||||||
print(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
logger.info(f"EXAM_NAME: {exam_name} SampNO: {sampNo}")
|
||||||
|
logger.info(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
||||||
calc_metrics.reset()
|
calc_metrics.reset()
|
||||||
|
|
||||||
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
||||||
@ -177,7 +201,7 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
||||||
if df_result.empty:
|
if df_result.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
||||||
@ -314,11 +338,11 @@ def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
|||||||
|
|
||||||
def confusionMatrix(df_analysis, base_path, dataset_type):
|
def confusionMatrix(df_analysis, base_path, dataset_type):
|
||||||
if df_analysis is None:
|
if df_analysis is None:
|
||||||
print(base_path, dataset_type, "is None")
|
logger.info(base_path, dataset_type, "is None")
|
||||||
return
|
return
|
||||||
|
|
||||||
if df_analysis.empty:
|
if df_analysis.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return
|
return
|
||||||
classes = ["normal", "SA"]
|
classes = ["normal", "SA"]
|
||||||
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
||||||
@ -411,8 +435,8 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
||||||
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
if value > 30:
|
# if value > 30:
|
||||||
print([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
# logger.info([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
||||||
# 长度不够
|
# 长度不够
|
||||||
else:
|
else:
|
||||||
df["thresh_Pred"][SP:EP] = 0
|
df["thresh_Pred"][SP:EP] = 0
|
||||||
@ -426,7 +450,7 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame(
|
df_event = df_event.append(pd.DataFrame(
|
||||||
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
||||||
(segment_no + 1) * 30, 0]], columns=columns),
|
(segment_no + 1) * 30, 0]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
|
|
||||||
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
@email:2021022362@m.scnu.edu.cn
|
@email:2021022362@m.scnu.edu.cn
|
||||||
@time:2022/02/21
|
@time:2022/02/21
|
||||||
"""
|
"""
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -69,13 +69,30 @@ columns = ["sampNo", "segmentNo", "label_type", "new_label", "SP", "EP", "pred"]
|
|||||||
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
||||||
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
||||||
|
|
||||||
|
logging.getLogger('matplotlib.font_manager').disabled = True
|
||||||
|
logging.getLogger('matplotlib.ticker').disabled = True
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
ch.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
if (exam_path / "test.log").exists():
|
||||||
|
(exam_path / "test.log").unlink()
|
||||||
|
fh = logging.FileHandler(exam_path / "test.log", mode='a')
|
||||||
|
fh.setLevel(logging.INFO)
|
||||||
|
fh.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
logger.addHandler(fh)
|
||||||
|
logger.info("------------------------------------")
|
||||||
|
|
||||||
|
|
||||||
def set_environment(i):
|
def set_environment(i):
|
||||||
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
||||||
model, model_name, train_set, test_set
|
model, model_name, train_set, test_set
|
||||||
|
|
||||||
output_path = all_output_path[i]
|
output_path = all_output_path[i]
|
||||||
print(output_path)
|
logger.info(output_path)
|
||||||
segments_results_save_path = (output_path / "segments_results")
|
segments_results_save_path = (output_path / "segments_results")
|
||||||
segments_results_save_path.mkdir(exist_ok=True)
|
segments_results_save_path.mkdir(exist_ok=True)
|
||||||
events_results_save_path = (output_path / "events_results")
|
events_results_save_path = (output_path / "events_results")
|
||||||
@ -102,6 +119,12 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
sampNo = train_set
|
sampNo = train_set
|
||||||
elif dataset_type == "all_test":
|
elif dataset_type == "all_test":
|
||||||
sampNo = test_set
|
sampNo = test_set
|
||||||
|
else:
|
||||||
|
sampNo = None
|
||||||
|
logger.info("出错了")
|
||||||
|
|
||||||
|
exam_name = Path("./").absolute().name
|
||||||
|
|
||||||
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
||||||
segment_augment=my_segment_augment)
|
segment_augment=my_segment_augment)
|
||||||
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
||||||
@ -142,7 +165,8 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
test_loss /= len(test_loader)
|
test_loss /= len(test_loader)
|
||||||
calc_metrics.compute()
|
calc_metrics.compute()
|
||||||
print(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
logger.info(f"EXAM_NAME: {exam_name} SampNO: {sampNo}")
|
||||||
|
logger.info(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
||||||
calc_metrics.reset()
|
calc_metrics.reset()
|
||||||
|
|
||||||
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
||||||
@ -177,7 +201,7 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
||||||
if df_result.empty:
|
if df_result.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
||||||
@ -314,11 +338,11 @@ def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
|||||||
|
|
||||||
def confusionMatrix(df_analysis, base_path, dataset_type):
|
def confusionMatrix(df_analysis, base_path, dataset_type):
|
||||||
if df_analysis is None:
|
if df_analysis is None:
|
||||||
print(base_path, dataset_type, "is None")
|
logger.info(base_path, dataset_type, "is None")
|
||||||
return
|
return
|
||||||
|
|
||||||
if df_analysis.empty:
|
if df_analysis.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return
|
return
|
||||||
classes = ["normal", "SA"]
|
classes = ["normal", "SA"]
|
||||||
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
||||||
@ -411,8 +435,8 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
||||||
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
if value > 30:
|
# if value > 30:
|
||||||
print([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
# logger.info([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
||||||
# 长度不够
|
# 长度不够
|
||||||
else:
|
else:
|
||||||
df["thresh_Pred"][SP:EP] = 0
|
df["thresh_Pred"][SP:EP] = 0
|
||||||
@ -426,7 +450,7 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame(
|
df_event = df_event.append(pd.DataFrame(
|
||||||
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
||||||
(segment_no + 1) * 30, 0]], columns=columns),
|
(segment_no + 1) * 30, 0]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
|
|
||||||
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
@email:2021022362@m.scnu.edu.cn
|
@email:2021022362@m.scnu.edu.cn
|
||||||
@time:2022/02/21
|
@time:2022/02/21
|
||||||
"""
|
"""
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -69,13 +69,30 @@ columns = ["sampNo", "segmentNo", "label_type", "new_label", "SP", "EP", "pred"]
|
|||||||
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
||||||
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
||||||
|
|
||||||
|
logging.getLogger('matplotlib.font_manager').disabled = True
|
||||||
|
logging.getLogger('matplotlib.ticker').disabled = True
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
ch.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
if (exam_path / "test.log").exists():
|
||||||
|
(exam_path / "test.log").unlink()
|
||||||
|
fh = logging.FileHandler(exam_path / "test.log", mode='a')
|
||||||
|
fh.setLevel(logging.INFO)
|
||||||
|
fh.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
logger.addHandler(fh)
|
||||||
|
logger.info("------------------------------------")
|
||||||
|
|
||||||
|
|
||||||
def set_environment(i):
|
def set_environment(i):
|
||||||
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
||||||
model, model_name, train_set, test_set
|
model, model_name, train_set, test_set
|
||||||
|
|
||||||
output_path = all_output_path[i]
|
output_path = all_output_path[i]
|
||||||
print(output_path)
|
logger.info(output_path)
|
||||||
segments_results_save_path = (output_path / "segments_results")
|
segments_results_save_path = (output_path / "segments_results")
|
||||||
segments_results_save_path.mkdir(exist_ok=True)
|
segments_results_save_path.mkdir(exist_ok=True)
|
||||||
events_results_save_path = (output_path / "events_results")
|
events_results_save_path = (output_path / "events_results")
|
||||||
@ -102,6 +119,12 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
sampNo = train_set
|
sampNo = train_set
|
||||||
elif dataset_type == "all_test":
|
elif dataset_type == "all_test":
|
||||||
sampNo = test_set
|
sampNo = test_set
|
||||||
|
else:
|
||||||
|
sampNo = None
|
||||||
|
logger.info("出错了")
|
||||||
|
|
||||||
|
exam_name = Path("./").absolute().name
|
||||||
|
|
||||||
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
||||||
segment_augment=my_segment_augment)
|
segment_augment=my_segment_augment)
|
||||||
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
||||||
@ -142,7 +165,8 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
test_loss /= len(test_loader)
|
test_loss /= len(test_loader)
|
||||||
calc_metrics.compute()
|
calc_metrics.compute()
|
||||||
print(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
logger.info(f"EXAM_NAME: {exam_name} SampNO: {sampNo}")
|
||||||
|
logger.info(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
||||||
calc_metrics.reset()
|
calc_metrics.reset()
|
||||||
|
|
||||||
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
||||||
@ -177,7 +201,7 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
||||||
if df_result.empty:
|
if df_result.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
||||||
@ -314,11 +338,11 @@ def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
|||||||
|
|
||||||
def confusionMatrix(df_analysis, base_path, dataset_type):
|
def confusionMatrix(df_analysis, base_path, dataset_type):
|
||||||
if df_analysis is None:
|
if df_analysis is None:
|
||||||
print(base_path, dataset_type, "is None")
|
logger.info(base_path, dataset_type, "is None")
|
||||||
return
|
return
|
||||||
|
|
||||||
if df_analysis.empty:
|
if df_analysis.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return
|
return
|
||||||
classes = ["normal", "SA"]
|
classes = ["normal", "SA"]
|
||||||
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
||||||
@ -411,8 +435,8 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
||||||
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
if value > 30:
|
# if value > 30:
|
||||||
print([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
# logger.info([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
||||||
# 长度不够
|
# 长度不够
|
||||||
else:
|
else:
|
||||||
df["thresh_Pred"][SP:EP] = 0
|
df["thresh_Pred"][SP:EP] = 0
|
||||||
@ -426,7 +450,7 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame(
|
df_event = df_event.append(pd.DataFrame(
|
||||||
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
||||||
(segment_no + 1) * 30, 0]], columns=columns),
|
(segment_no + 1) * 30, 0]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
|
|
||||||
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
||||||
|
|
||||||
@ -446,9 +470,9 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
# shap_values = explainer.shap_values()
|
# shap_values = explainer.shap_values()
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
all_output_path = list(exam_path.rglob("KFold_0"))
|
all_output_path = list(exam_path.rglob("KFold_*"))
|
||||||
for exam_index, test_exam_path in enumerate(all_output_path):
|
for exam_index, test_exam_path in enumerate(all_output_path):
|
||||||
# test_exam_path = exam_path / test_exam_path
|
# test_exam_path = exam_path / test_exam_path
|
||||||
set_environment(exam_index)
|
set_environment(exam_index)
|
||||||
# test_and_analysis_and_visual(dataset_type="test")
|
test_and_analysis_and_visual(dataset_type="test")
|
||||||
test_and_analysis_and_visual(dataset_type="all_test")
|
test_and_analysis_and_visual(dataset_type="all_test")
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
@email:2021022362@m.scnu.edu.cn
|
@email:2021022362@m.scnu.edu.cn
|
||||||
@time:2022/02/21
|
@time:2022/02/21
|
||||||
"""
|
"""
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -69,13 +69,30 @@ columns = ["sampNo", "segmentNo", "label_type", "new_label", "SP", "EP", "pred"]
|
|||||||
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
||||||
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
||||||
|
|
||||||
|
logging.getLogger('matplotlib.font_manager').disabled = True
|
||||||
|
logging.getLogger('matplotlib.ticker').disabled = True
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
ch.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
if (exam_path / "test.log").exists():
|
||||||
|
(exam_path / "test.log").unlink()
|
||||||
|
fh = logging.FileHandler(exam_path / "test.log", mode='a')
|
||||||
|
fh.setLevel(logging.INFO)
|
||||||
|
fh.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
logger.addHandler(fh)
|
||||||
|
logger.info("------------------------------------")
|
||||||
|
|
||||||
|
|
||||||
def set_environment(i):
|
def set_environment(i):
|
||||||
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
||||||
model, model_name, train_set, test_set
|
model, model_name, train_set, test_set
|
||||||
|
|
||||||
output_path = all_output_path[i]
|
output_path = all_output_path[i]
|
||||||
print(output_path)
|
logger.info(output_path)
|
||||||
segments_results_save_path = (output_path / "segments_results")
|
segments_results_save_path = (output_path / "segments_results")
|
||||||
segments_results_save_path.mkdir(exist_ok=True)
|
segments_results_save_path.mkdir(exist_ok=True)
|
||||||
events_results_save_path = (output_path / "events_results")
|
events_results_save_path = (output_path / "events_results")
|
||||||
@ -102,6 +119,12 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
sampNo = train_set
|
sampNo = train_set
|
||||||
elif dataset_type == "all_test":
|
elif dataset_type == "all_test":
|
||||||
sampNo = test_set
|
sampNo = test_set
|
||||||
|
else:
|
||||||
|
sampNo = None
|
||||||
|
logger.info("出错了")
|
||||||
|
|
||||||
|
exam_name = Path("./").absolute().name
|
||||||
|
|
||||||
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
||||||
segment_augment=my_segment_augment)
|
segment_augment=my_segment_augment)
|
||||||
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
||||||
@ -142,7 +165,8 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
test_loss /= len(test_loader)
|
test_loss /= len(test_loader)
|
||||||
calc_metrics.compute()
|
calc_metrics.compute()
|
||||||
print(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
logger.info(f"EXAM_NAME: {exam_name} SampNO: {sampNo}")
|
||||||
|
logger.info(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
||||||
calc_metrics.reset()
|
calc_metrics.reset()
|
||||||
|
|
||||||
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
||||||
@ -177,7 +201,7 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
||||||
if df_result.empty:
|
if df_result.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
||||||
@ -314,11 +338,11 @@ def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
|||||||
|
|
||||||
def confusionMatrix(df_analysis, base_path, dataset_type):
|
def confusionMatrix(df_analysis, base_path, dataset_type):
|
||||||
if df_analysis is None:
|
if df_analysis is None:
|
||||||
print(base_path, dataset_type, "is None")
|
logger.info(base_path, dataset_type, "is None")
|
||||||
return
|
return
|
||||||
|
|
||||||
if df_analysis.empty:
|
if df_analysis.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return
|
return
|
||||||
classes = ["normal", "SA"]
|
classes = ["normal", "SA"]
|
||||||
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
||||||
@ -411,8 +435,8 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
||||||
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
if value > 30:
|
# if value > 30:
|
||||||
print([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
# logger.info([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
||||||
# 长度不够
|
# 长度不够
|
||||||
else:
|
else:
|
||||||
df["thresh_Pred"][SP:EP] = 0
|
df["thresh_Pred"][SP:EP] = 0
|
||||||
@ -426,7 +450,7 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame(
|
df_event = df_event.append(pd.DataFrame(
|
||||||
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
||||||
(segment_no + 1) * 30, 0]], columns=columns),
|
(segment_no + 1) * 30, 0]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
|
|
||||||
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
@email:2021022362@m.scnu.edu.cn
|
@email:2021022362@m.scnu.edu.cn
|
||||||
@time:2022/02/21
|
@time:2022/02/21
|
||||||
"""
|
"""
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -69,13 +69,30 @@ columns = ["sampNo", "segmentNo", "label_type", "new_label", "SP", "EP", "pred"]
|
|||||||
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
columns2 = ["sampNo", "severity", "origin_P", "origin_N", "pred_P", "pred_N", "T", "F", "TP", "TN", "FP", "FN",
|
||||||
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
"acc", "recall", "spec", "pre", "NPV", "F1score", "support"]
|
||||||
|
|
||||||
|
logging.getLogger('matplotlib.font_manager').disabled = True
|
||||||
|
logging.getLogger('matplotlib.ticker').disabled = True
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
ch.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
if (exam_path / "test.log").exists():
|
||||||
|
(exam_path / "test.log").unlink()
|
||||||
|
fh = logging.FileHandler(exam_path / "test.log", mode='a')
|
||||||
|
fh.setLevel(logging.INFO)
|
||||||
|
fh.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
logger.addHandler(fh)
|
||||||
|
logger.info("------------------------------------")
|
||||||
|
|
||||||
|
|
||||||
def set_environment(i):
|
def set_environment(i):
|
||||||
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
global output_path, segments_results_save_path, events_results_save_path, model_path, label_path, data_path, \
|
||||||
model, model_name, train_set, test_set
|
model, model_name, train_set, test_set
|
||||||
|
|
||||||
output_path = all_output_path[i]
|
output_path = all_output_path[i]
|
||||||
print(output_path)
|
logger.info(output_path)
|
||||||
segments_results_save_path = (output_path / "segments_results")
|
segments_results_save_path = (output_path / "segments_results")
|
||||||
segments_results_save_path.mkdir(exist_ok=True)
|
segments_results_save_path.mkdir(exist_ok=True)
|
||||||
events_results_save_path = (output_path / "events_results")
|
events_results_save_path = (output_path / "events_results")
|
||||||
@ -102,6 +119,12 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
sampNo = train_set
|
sampNo = train_set
|
||||||
elif dataset_type == "all_test":
|
elif dataset_type == "all_test":
|
||||||
sampNo = test_set
|
sampNo = test_set
|
||||||
|
else:
|
||||||
|
sampNo = None
|
||||||
|
logger.info("出错了")
|
||||||
|
|
||||||
|
exam_name = Path("./").absolute().name
|
||||||
|
|
||||||
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
test_dataset = TestApneaDataset2(data_path, label_path, select_sampno=sampNo, dataset_type=dataset_type,
|
||||||
segment_augment=my_segment_augment)
|
segment_augment=my_segment_augment)
|
||||||
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
test_loader = DataLoader(test_dataset, batch_size=128, pin_memory=True, num_workers=0)
|
||||||
@ -142,7 +165,8 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
test_loss /= len(test_loader)
|
test_loss /= len(test_loader)
|
||||||
calc_metrics.compute()
|
calc_metrics.compute()
|
||||||
print(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
logger.info(f"EXAM_NAME: {exam_name} SampNO: {sampNo}")
|
||||||
|
logger.info(calc_metrics.get_matrix(loss=test_loss, epoch=0, epoch_type="test"))
|
||||||
calc_metrics.reset()
|
calc_metrics.reset()
|
||||||
|
|
||||||
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
df_segment["thresh_label"] = 1 * (df_segment["label_type"] > event_thresh).copy()
|
||||||
@ -177,7 +201,7 @@ def test_and_analysis_and_visual(dataset_type):
|
|||||||
|
|
||||||
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
||||||
if df_result.empty:
|
if df_result.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type).mkdir(exist_ok=True, parents=True)
|
||||||
@ -314,11 +338,11 @@ def analysis_results(df_result, base_path, dataset_type, is_event=False):
|
|||||||
|
|
||||||
def confusionMatrix(df_analysis, base_path, dataset_type):
|
def confusionMatrix(df_analysis, base_path, dataset_type):
|
||||||
if df_analysis is None:
|
if df_analysis is None:
|
||||||
print(base_path, dataset_type, "is None")
|
logger.info(base_path, dataset_type, "is None")
|
||||||
return
|
return
|
||||||
|
|
||||||
if df_analysis.empty:
|
if df_analysis.empty:
|
||||||
print(base_path, dataset_type, "is_empty")
|
logger.info(base_path, dataset_type, "is_empty")
|
||||||
return
|
return
|
||||||
classes = ["normal", "SA"]
|
classes = ["normal", "SA"]
|
||||||
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
(base_path / dataset_type / "confusionMatrix").mkdir(exist_ok=True, parents=True)
|
||||||
@ -411,8 +435,8 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
df_event = df_event.append(pd.DataFrame([[int(sampNo), SP // 30, label_type, new_label,
|
||||||
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
SP, EP, thresh_Pred2[SP]]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
if value > 30:
|
# if value > 30:
|
||||||
print([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
# logger.info([int(sampNo), SP // 30, label_type, new_label, SP, EP, thresh_Pred2[SP]])
|
||||||
# 长度不够
|
# 长度不够
|
||||||
else:
|
else:
|
||||||
df["thresh_Pred"][SP:EP] = 0
|
df["thresh_Pred"][SP:EP] = 0
|
||||||
@ -426,7 +450,7 @@ def segment_to_event(df_segment, dataset_type):
|
|||||||
df_event = df_event.append(pd.DataFrame(
|
df_event = df_event.append(pd.DataFrame(
|
||||||
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
[[int(sampNo), segment_no, df_temp["label_type"].max(), df_temp["new_label"].max(), segment_no * 30,
|
||||||
(segment_no + 1) * 30, 0]], columns=columns),
|
(segment_no + 1) * 30, 0]], columns=columns),
|
||||||
ignore_index=True)
|
ignore_index=True)
|
||||||
|
|
||||||
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
df_all_event = df_all_event.append(df_event, ignore_index=True)
|
||||||
|
|
||||||
|
@ -5,9 +5,11 @@
|
|||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
000
|
000
|
||||||
学习率 1e-4 epoch 20 过早(epoch 6)过拟合
|
学习率 1e-4 epoch 20 过早(epoch 6)过拟合
|
||||||
|
------------------------------------------------
|
||||||
001
|
001
|
||||||
学习率 1e-5 epoch 50 可以
|
学习率 1e-5 epoch 50 可以
|
||||||
|
|
||||||
|
-----------------------------------------------
|
||||||
002
|
002
|
||||||
学习率 1e-6 epoch 50 比较合适,就是太慢了
|
学习率 1e-6 epoch 50 比较合适,就是太慢了
|
||||||
|
|
||||||
@ -63,7 +65,34 @@
|
|||||||
数据集减去平均值外,使用每个片段的混叠信号Z-score整理数据集
|
数据集减去平均值外,使用每个片段的混叠信号Z-score整理数据集
|
||||||
--------------------------------------------------------
|
--------------------------------------------------------
|
||||||
013
|
013
|
||||||
学习率 1e-5 epoch 50
|
BEST TEST 学习率 1e-5 epoch 50
|
||||||
|
Acc Rec Pre Spe F1
|
||||||
|
all 84.56 85.53 65.01 84.22 73.87
|
||||||
|
|
||||||
|
EXAM_NAME: 013 SampNO: [582, 1000]
|
||||||
|
test epoch: 0 loss: 1.1915464037836785 lr: None
|
||||||
|
Acc Rec Pre Spe F1
|
||||||
|
all 60.04 11.88 4.92 66.96 6.96
|
||||||
|
|
||||||
|
EXAM_NAME: 013 SampNO: [286, 1009]
|
||||||
|
test epoch: 0 loss: 0.6172940751263513 lr: None
|
||||||
|
Acc Rec Pre Spe F1
|
||||||
|
all 77.71 61.15 62.02 84.55 61.58
|
||||||
|
|
||||||
|
EXAM_NAME: 013 SampNO: [282, 966]
|
||||||
|
test epoch: 0 loss: 1.210226387001914 lr: None
|
||||||
|
Acc Rec Pre Spe F1
|
||||||
|
all 39.09 95.56 0.23 39.01 0.46
|
||||||
|
|
||||||
|
EXAM_NAME: 013 SampNO: [726, 1006]
|
||||||
|
test epoch: 0 loss: 0.603616701004974 lr: None
|
||||||
|
Acc Rec Pre Spe F1
|
||||||
|
all 73.5 56.9 41.81 78.1 48.21
|
||||||
|
|
||||||
|
EXAM_NAME: 013 SampNO: [229, 1004]
|
||||||
|
test epoch: 0 loss: 0.3528211629552555 lr: None
|
||||||
|
Acc Rec Pre Spe F1
|
||||||
|
all 84.68 17.63 4.82 87.15 7.56
|
||||||
--------------------------------------------------------
|
--------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user