Compare commits
No commits in common. "da9dc81248a106123224732146b8b84f0bed105a" and "abc6526143033b1764524451687bec589df668e7" have entirely different histories.
da9dc81248
...
abc6526143
25 changed files with 1 additions and 1121 deletions
4
.gitignore
vendored
4
.gitignore
vendored
|
|
@ -162,7 +162,3 @@ cython_debug/
|
|||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
.venv/
|
||||
.idea/
|
||||
.ipynb_checkpoints/
|
||||
Data/
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
import openpyxl
|
||||
import csv
|
||||
import os.path
|
||||
import glob
|
||||
|
||||
|
||||
def read_value(patient_dir, atlas, data_type):
|
||||
folder_name = os.path.basename(patient_dir)
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
result = [folder_name]
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
mean = float(row["meanValue"])
|
||||
result.append(mean)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def read_header(patient_dir, atlas, data_type):
|
||||
header = ['编号']
|
||||
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
region_name = row["Chinese Name"]
|
||||
header.append(region_name)
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def write_to_work_sheet(work_sheet, patient_list, atlas, data_type):
|
||||
if len(patient_list) == 0:
|
||||
return
|
||||
|
||||
header = read_header(patient_list[0], atlas, data_type)
|
||||
work_sheet.append(header)
|
||||
|
||||
for patient_dir in patient_list:
|
||||
print(data_type, atlas, patient_dir)
|
||||
patient_data = read_value(patient_dir, atlas, data_type)
|
||||
|
||||
work_sheet.append(patient_data)
|
||||
|
||||
|
||||
def main():
|
||||
# 选择导出的数据类型,请尝试增加VOL类型,并修改read_value、read_header函数以支持读取体积
|
||||
data_types = ['corr-CBF', 'ATT', 'ACBV', 'CBF1', 'CBF2', 'CBF3', 'CBF4', 'CBF5']
|
||||
# 选择导出后的图谱
|
||||
atlas_list = ['AnImage_WholeBrain', 'AnImage_BrainLobes', 'AnImage_AAL3']
|
||||
# 选择输出的位置
|
||||
output_file = r'..\Data\csv-extract.xlsx'
|
||||
# 选择输入的数据
|
||||
patient_list = glob.glob(r'..\Data\csv-data\*')
|
||||
# 创建新的excel workbook
|
||||
work_book = openpyxl.Workbook()
|
||||
# 删除默认worksheet
|
||||
work_book.remove(work_book.active)
|
||||
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
work_sheet = work_book.create_sheet(title=f'{atlas}_{data_type}')
|
||||
write_to_work_sheet(work_sheet, patient_list, atlas, data_type)
|
||||
|
||||
# 保存workbook到xlsx文件
|
||||
work_book.save(output_file)
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
import SimpleITK as sitk
|
||||
|
||||
|
||||
def command_iteration(method):
|
||||
print(f"{method.GetOptimizerIteration()} = {method.GetMetricValue():.5f} {method.GetOptimizerPosition()}")
|
||||
|
||||
|
||||
def registration(fixed_image_path, moving_image_path, new_image_path):
|
||||
fixed_image = sitk.ReadImage(fixed_image_path, sitk.sitkFloat32)
|
||||
moving_image = sitk.ReadImage(moving_image_path, sitk.sitkFloat32)
|
||||
|
||||
registration_method = sitk.ImageRegistrationMethod()
|
||||
|
||||
registration_method.SetMetricAsJointHistogramMutualInformation(numberOfHistogramBins=50)
|
||||
registration_method.SetMetricSamplingStrategy(registration_method.REGULAR)
|
||||
registration_method.SetMetricSamplingPercentage(0.2, 42)
|
||||
registration_method.SetGlobalDefaultNumberOfThreads(1)
|
||||
registration_method.SetOptimizerScalesFromPhysicalShift()
|
||||
|
||||
registration_method.SetInterpolator(sitk.sitkLinear)
|
||||
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(registration_method))
|
||||
|
||||
registration_method.SetOptimizerAsRegularStepGradientDescent(minStep=0.0001,
|
||||
learningRate=1.0,
|
||||
numberOfIterations=400,
|
||||
gradientMagnitudeTolerance=1e-8)
|
||||
|
||||
initial_transform = sitk.CenteredTransformInitializer(fixed_image, moving_image, sitk.Euler3DTransform())
|
||||
registration_method.SetInitialTransform(initial_transform, inPlace=True)
|
||||
|
||||
# 进行配准
|
||||
final_transform_v4 = registration_method.Execute(fixed_image, moving_image)
|
||||
|
||||
# 打印结束条件
|
||||
stop_condition = registration_method.GetOptimizerStopConditionDescription()
|
||||
print(stop_condition)
|
||||
|
||||
# 重采样新的图像
|
||||
moving_resampled = sitk.Resample(moving_image, fixed_image, final_transform_v4, sitk.sitkLinear, 0.0,
|
||||
moving_image.GetPixelID())
|
||||
|
||||
sitk.WriteImage(moving_resampled, new_image_path)
|
||||
|
||||
|
||||
def main():
|
||||
fixed_image_path = r"..\Data\registration\t1.nii.gz"
|
||||
moving_image_path = r"..\Data\registration\t2 flair.nii.gz"
|
||||
new_image_path = r"..\Data\registration\t2 flair_resampled.nii.gz"
|
||||
|
||||
registration(fixed_image_path, moving_image_path, new_image_path)
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
import openpyxl
|
||||
import csv
|
||||
import os.path
|
||||
import glob
|
||||
|
||||
|
||||
def read_value(patient_dir, atlas, data_type):
|
||||
folder_name = os.path.basename(patient_dir)
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
result = [folder_name]
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
mean = float(row["meanValue"])
|
||||
result.append(mean)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def read_header(patient_dir, atlas, data_type):
|
||||
header = ['编号']
|
||||
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
region_name = row["Chinese Name"]
|
||||
header.append(region_name)
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def generate_stats_row(name, region_size, function, patient_size):
|
||||
result = [name]
|
||||
|
||||
for i in range(region_size):
|
||||
letter = openpyxl.utils.cell.get_column_letter(i + 2)
|
||||
result.append(f'={function}({letter}2:{letter}{patient_size + 1})')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def write_to_work_sheet(work_sheet, patient_list, atlas, data_type):
|
||||
if len(patient_list) == 0:
|
||||
return
|
||||
|
||||
header = read_header(patient_list[0], atlas, data_type)
|
||||
|
||||
work_sheet.append(header)
|
||||
|
||||
for patient_dir in patient_list:
|
||||
print(data_type, patient_dir)
|
||||
patient_data = read_value(patient_dir, atlas, data_type)
|
||||
|
||||
work_sheet.append(patient_data)
|
||||
|
||||
region_size = len(header) - 1
|
||||
work_sheet.append([])
|
||||
work_sheet.append([])
|
||||
work_sheet.append(generate_stats_row('均值', region_size, 'AVERAGE', len(patient_list)))
|
||||
work_sheet.append(generate_stats_row('标准差', region_size, 'STDEV', len(patient_list)))
|
||||
work_sheet.append(generate_stats_row('最小值', region_size, 'MIN', len(patient_list)))
|
||||
# 请尝试在空格的位置添加 5% 25% 50% 75% 95% 分位数,使用excel的PERCENTILE函数
|
||||
work_sheet.append([])
|
||||
work_sheet.append([])
|
||||
work_sheet.append([])
|
||||
work_sheet.append([])
|
||||
work_sheet.append([])
|
||||
work_sheet.append(generate_stats_row('最大值', region_size, 'MAX', len(patient_list)))
|
||||
|
||||
|
||||
def main():
|
||||
data_types = ['corr-CBF', 'ATT', 'ACBV', 'CBF1', 'CBF2', 'CBF3', 'CBF4', 'CBF5']
|
||||
atlas_list = ['AnImage_WholeBrain', 'AnImage_BrainLobes', 'AnImage_AAL3']
|
||||
|
||||
output_file = r'..\Data\csv-simple-stat.xlsx'
|
||||
|
||||
patient_list = glob.glob(r'..\Data\csv-data\*')
|
||||
|
||||
work_book = openpyxl.Workbook()
|
||||
work_book.remove(work_book.active)
|
||||
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
work_sheet = work_book.create_sheet(title=f'{atlas}_{data_type}')
|
||||
write_to_work_sheet(work_sheet, patient_list, atlas, data_type)
|
||||
|
||||
work_book.save(output_file)
|
||||
|
||||
|
||||
main()
|
||||
104
Code/3-group.py
104
Code/3-group.py
|
|
@ -1,104 +0,0 @@
|
|||
import openpyxl
|
||||
import csv
|
||||
import os.path
|
||||
|
||||
|
||||
def read_value(patient_dir, atlas, data_type):
|
||||
folder_name = os.path.basename(patient_dir)
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
result = [folder_name]
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
mean = float(row["meanValue"])
|
||||
result.append(mean)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def read_header(patient_dir, atlas, data_type):
|
||||
header = ['编号']
|
||||
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
region_name = row["Chinese Name"]
|
||||
header.append(region_name)
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def generate_stats_row(name, region_size, function, patient_size, extra_parameter):
|
||||
result = [name]
|
||||
|
||||
for i in range(region_size):
|
||||
letter = openpyxl.utils.cell.get_column_letter(i + 2)
|
||||
result.append(f'={function}({letter}2:{letter}{patient_size + 1}{extra_parameter})')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def write_to_work_sheet(work_sheet, patient_list, atlas, data_type):
|
||||
if len(patient_list) == 0:
|
||||
return
|
||||
|
||||
header = read_header(patient_list[0], atlas, data_type)
|
||||
|
||||
work_sheet.append(header)
|
||||
|
||||
for patient_dir in patient_list:
|
||||
print(data_type, patient_dir)
|
||||
patient_data = read_value(patient_dir, atlas, data_type)
|
||||
|
||||
work_sheet.append(patient_data)
|
||||
|
||||
region_size = len(header) - 1
|
||||
work_sheet.append([])
|
||||
work_sheet.append([])
|
||||
work_sheet.append(generate_stats_row('均值', region_size, 'AVERAGE', len(patient_list), ''))
|
||||
work_sheet.append(generate_stats_row('标准差', region_size, 'STDEV', len(patient_list), ''))
|
||||
work_sheet.append(generate_stats_row('最小值', region_size, 'MIN', len(patient_list), ''))
|
||||
work_sheet.append(generate_stats_row('5%', region_size, 'PERCENTILE', len(patient_list), ',0.05'))
|
||||
work_sheet.append(generate_stats_row('25%', region_size, 'PERCENTILE', len(patient_list), ',0.25'))
|
||||
work_sheet.append(generate_stats_row('50%', region_size, 'PERCENTILE', len(patient_list), ',0.50'))
|
||||
work_sheet.append(generate_stats_row('75%', region_size, 'PERCENTILE', len(patient_list), ',0.75'))
|
||||
work_sheet.append(generate_stats_row('95%', region_size, 'PERCENTILE', len(patient_list), ',0.95'))
|
||||
work_sheet.append(generate_stats_row('最大值', region_size, 'MAX', len(patient_list), ''))
|
||||
|
||||
|
||||
def main():
|
||||
data_types = ['corr-CBF', "ATT"]
|
||||
atlas_list = ['AnImage_WholeBrain', 'AnImage_BrainLobes', 'AnImage_AAL3']
|
||||
|
||||
output_file = r'..\Data\csv-group.xlsx'
|
||||
group_file = r'..\Data\group.xlsx'
|
||||
patient_root = r'..\Data\csv-data'
|
||||
|
||||
groups = {}
|
||||
# 读取分组信息
|
||||
group_workbook = openpyxl.load_workbook(group_file)
|
||||
for sheet_name in group_workbook.sheetnames:
|
||||
group = []
|
||||
group_sheet = group_workbook[sheet_name]
|
||||
for i in range(1, group_sheet.max_row + 1):
|
||||
patient_id = group_sheet.cell(row=i, column=1).value
|
||||
patient_path = os.path.join(patient_root, patient_id)
|
||||
group.append(patient_path)
|
||||
groups[sheet_name] = group
|
||||
|
||||
work_book = openpyxl.Workbook()
|
||||
work_book.remove(work_book.active)
|
||||
|
||||
for group_key in groups.keys():
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
work_sheet = work_book.create_sheet(title=f'{group_key}_{atlas}_{data_type}')
|
||||
write_to_work_sheet(work_sheet, groups[group_key], atlas, data_type)
|
||||
|
||||
work_book.save(output_file)
|
||||
|
||||
|
||||
main()
|
||||
184
Code/4-ANOVA.py
184
Code/4-ANOVA.py
|
|
@ -1,184 +0,0 @@
|
|||
import openpyxl
|
||||
import csv
|
||||
import os.path
|
||||
|
||||
|
||||
def read_value(patient_dir, atlas, data_type):
|
||||
folder_name = os.path.basename(patient_dir)
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
result = [folder_name]
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
mean = float(row["meanValue"])
|
||||
result.append(mean)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def read_header(patient_dir, atlas, data_type):
|
||||
header = ['编号']
|
||||
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
region_name = row["Chinese Name"]
|
||||
header.append(region_name)
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def generate_stats_row(name, region_size, function, patient_size, extra_parameter):
|
||||
result = [name]
|
||||
|
||||
for i in range(region_size):
|
||||
letter = openpyxl.utils.cell.get_column_letter(i + 2)
|
||||
result.append(f'={function}({letter}2:{letter}{patient_size + 1}{extra_parameter})')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def write_to_work_sheet(work_sheet, patient_list, atlas, data_type):
|
||||
if len(patient_list) == 0:
|
||||
return
|
||||
|
||||
header = read_header(patient_list[0], atlas, data_type)
|
||||
|
||||
work_sheet.append(header)
|
||||
|
||||
for patient_dir in patient_list:
|
||||
print(data_type, patient_dir)
|
||||
patient_data = read_value(patient_dir, atlas, data_type)
|
||||
|
||||
work_sheet.append(patient_data)
|
||||
|
||||
region_size = len(header) - 1
|
||||
work_sheet.append([])
|
||||
work_sheet.append([])
|
||||
work_sheet.append(generate_stats_row('均值', region_size, 'AVERAGE', len(patient_list), ''))
|
||||
work_sheet.append(generate_stats_row('标准差', region_size, 'STDEV', len(patient_list), ''))
|
||||
work_sheet.append(generate_stats_row('最小值', region_size, 'MIN', len(patient_list), ''))
|
||||
work_sheet.append(generate_stats_row('5%', region_size, 'PERCENTILE', len(patient_list), ',0.05'))
|
||||
work_sheet.append(generate_stats_row('25%', region_size, 'PERCENTILE', len(patient_list), ',0.25'))
|
||||
work_sheet.append(generate_stats_row('50%', region_size, 'PERCENTILE', len(patient_list), ',0.50'))
|
||||
work_sheet.append(generate_stats_row('75%', region_size, 'PERCENTILE', len(patient_list), ',0.75'))
|
||||
work_sheet.append(generate_stats_row('95%', region_size, 'PERCENTILE', len(patient_list), ',0.95'))
|
||||
work_sheet.append(generate_stats_row('最大值', region_size, 'MAX', len(patient_list), ''))
|
||||
|
||||
|
||||
def write_sum(start_index, step, count, row_num):
|
||||
result = f"={openpyxl.utils.cell.get_column_letter(start_index)}{row_num}"
|
||||
for i in range(1, count):
|
||||
letter = openpyxl.utils.cell.get_column_letter(start_index + i * step)
|
||||
result += f'+{letter}{row_num}'
|
||||
return result
|
||||
|
||||
|
||||
def write_anova(workbook, work_sheet, groups, atlas, data_type):
|
||||
header_group = [""] * 14
|
||||
|
||||
for group_name in groups.keys():
|
||||
header_group += [group_name] * 5
|
||||
work_sheet.append(header_group)
|
||||
|
||||
header_cn = ['', '样本数量', '平均值', '标准差', '总平方和', '组间平方和', '组内平方和', '检验', '组间均方',
|
||||
'自由度', '组内均方', '自由度', '', '']
|
||||
for _ in groups.keys():
|
||||
header_cn += ['样本数量', '平均值', '标准差', '组间平方和', '组内平方和']
|
||||
work_sheet.append(header_cn)
|
||||
|
||||
header_en = ['', 'N', 'AVG', 'STD', 'TSS', 'BSS', 'WSS', 'check', 'BMSS', 'df', 'WMSS', 'df', 'F', 'p']
|
||||
for _ in groups.keys():
|
||||
header_en += ['N', 'AVG', 'STD', 'BSS', 'WSS']
|
||||
work_sheet.append(header_en)
|
||||
|
||||
sheet_dict = {}
|
||||
all_group_count = 0
|
||||
for group_name, group in groups.items():
|
||||
sheet_dict[group_name] = f'{group_name}_{atlas}_{data_type}'
|
||||
all_group_count += len(group)
|
||||
all_group_sheet_name = f'ALL_{atlas}_{data_type}'
|
||||
|
||||
first_group_sheet = workbook[list(sheet_dict.values())[0]]
|
||||
for column_num in range(2, first_group_sheet.max_column + 1):
|
||||
column_letter = openpyxl.utils.cell.get_column_letter(column_num)
|
||||
curr_row_number = column_num + 2
|
||||
data_row = [first_group_sheet.cell(row=1, column=column_num).value]
|
||||
data_row += [write_sum(15, 5, len(sheet_dict), curr_row_number)]
|
||||
data_row += [f'=AVERAGE(\'{all_group_sheet_name}\'!{column_letter}2:{column_letter}{all_group_count + 1})']
|
||||
data_row += [f'=STDEV(\'{all_group_sheet_name}\'!{column_letter}2:{column_letter}{all_group_count + 1})']
|
||||
data_row += [f'=DEVSQ(\'{all_group_sheet_name}\'!{column_letter}2:{column_letter}{all_group_count + 1})']
|
||||
data_row += [write_sum(18, 5, len(sheet_dict), curr_row_number)]
|
||||
data_row += [write_sum(19, 5, len(sheet_dict), curr_row_number)]
|
||||
data_row += [f'=E{curr_row_number}-F{curr_row_number}-G{curr_row_number}']
|
||||
data_row += [f'=F{curr_row_number}/J{curr_row_number}']
|
||||
data_row += [len(groups) - 1]
|
||||
data_row += [f'=G{curr_row_number}/L{curr_row_number}']
|
||||
data_row += [f'=B{curr_row_number}-J{curr_row_number}-1']
|
||||
data_row += [f'=I{curr_row_number}/K{curr_row_number}']
|
||||
data_row += [f'=FDIST(M{curr_row_number}, J{curr_row_number}, L{curr_row_number})']
|
||||
|
||||
for group_name, sheet_name in sheet_dict.items():
|
||||
data_count = len(groups[group_name])
|
||||
current_column_num = len(data_row) + 1
|
||||
count_letter = openpyxl.utils.cell.get_column_letter(current_column_num)
|
||||
avg_letter = openpyxl.utils.cell.get_column_letter(current_column_num + 1)
|
||||
data_row += [f'=COUNTA(\'{sheet_name}\'!{column_letter}2:{column_letter}{data_count + 1})']
|
||||
data_row += [f'=AVERAGE(\'{sheet_name}\'!{column_letter}2:{column_letter}{data_count + 1})']
|
||||
data_row += [f'=STDEV(\'{sheet_name}\'!{column_letter}2:{column_letter}{data_count + 1})']
|
||||
data_row += [f'={count_letter}{curr_row_number}*({avg_letter}{curr_row_number}-C{curr_row_number})^2']
|
||||
data_row += [f'=DEVSQ(\'{sheet_name}\'!{column_letter}2:{column_letter}{data_count + 1})']
|
||||
|
||||
work_sheet.append(data_row)
|
||||
|
||||
|
||||
def main():
|
||||
data_types = ['corr-CBF']
|
||||
atlas_list = ['AnImage_AAL3']
|
||||
|
||||
output_file = r'..\Data\csv-ANOVA.xlsx'
|
||||
group_file = r'..\Data\group.xlsx'
|
||||
patient_root = r'..\Data\csv-data'
|
||||
|
||||
groups = {}
|
||||
|
||||
group_workbook = openpyxl.load_workbook(group_file)
|
||||
for sheet_name in group_workbook.sheetnames:
|
||||
group = []
|
||||
group_sheet = group_workbook[sheet_name]
|
||||
for i in range(1, group_sheet.max_row + 1):
|
||||
patient_id = group_sheet.cell(row=i, column=1).value
|
||||
patient_path = os.path.join(patient_root, patient_id)
|
||||
group.append(patient_path)
|
||||
groups[sheet_name] = group
|
||||
|
||||
all_group = []
|
||||
for group in groups.values():
|
||||
all_group += group
|
||||
|
||||
work_book = openpyxl.Workbook()
|
||||
work_book.remove(work_book.active)
|
||||
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
atlas_new_name = atlas.replace("AnImage_", "")
|
||||
for group_key in groups.keys():
|
||||
work_sheet = work_book.create_sheet(title=f'{group_key}_{atlas_new_name}_{data_type}')
|
||||
write_to_work_sheet(work_sheet, groups[group_key], atlas, data_type)
|
||||
|
||||
work_sheet = work_book.create_sheet(title=f'ALL_{atlas_new_name}_{data_type}')
|
||||
write_to_work_sheet(work_sheet, all_group, atlas, data_type)
|
||||
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
atlas_new_name = atlas.replace("AnImage_", "")
|
||||
work_sheet = work_book.create_sheet(title=f'ANOVA_{atlas_new_name}_{data_type}')
|
||||
write_anova(work_book, work_sheet, groups, atlas_new_name, data_type)
|
||||
|
||||
work_book.save(output_file)
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,150 +0,0 @@
|
|||
import numpy as np
|
||||
import openpyxl
|
||||
import csv
|
||||
import os.path
|
||||
from scipy.stats import f_oneway
|
||||
|
||||
|
||||
def read_value(patient_dir, atlas, data_type):
|
||||
folder_name = os.path.basename(patient_dir)
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
result = [folder_name]
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
mean = float(row["meanValue"])
|
||||
result.append(mean)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def read_header(patient_dir, atlas, data_type):
|
||||
header = ['编号']
|
||||
|
||||
csv_file_path = os.path.join(patient_dir, f"{data_type}_{atlas}_result", "result_atlas.csv")
|
||||
|
||||
with open(csv_file_path, mode='r') as data_csv_file:
|
||||
csv_reader = csv.DictReader(data_csv_file)
|
||||
for row in csv_reader:
|
||||
region_name = row["Chinese Name"]
|
||||
header.append(region_name)
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def generate_stats_row(name, region_size, function, patient_size, extra_parameter):
|
||||
result = [name]
|
||||
|
||||
for i in range(region_size):
|
||||
letter = openpyxl.utils.cell.get_column_letter(i + 2)
|
||||
result.append(f'={function}({letter}2:{letter}{patient_size + 1}{extra_parameter})')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def write_to_work_sheet(work_sheet, patient_list, atlas, data_type):
|
||||
if len(patient_list) == 0:
|
||||
return
|
||||
|
||||
header = read_header(patient_list[0], atlas, data_type)
|
||||
|
||||
work_sheet.append(header)
|
||||
|
||||
for patient_dir in patient_list:
|
||||
print(data_type, patient_dir)
|
||||
patient_data = read_value(patient_dir, atlas, data_type)
|
||||
|
||||
work_sheet.append(patient_data)
|
||||
|
||||
region_size = len(header) - 1
|
||||
work_sheet.append([])
|
||||
work_sheet.append([])
|
||||
work_sheet.append(generate_stats_row('均值', region_size, 'AVERAGE', len(patient_list), ''))
|
||||
work_sheet.append(generate_stats_row('标准差', region_size, 'STDEV', len(patient_list), ''))
|
||||
work_sheet.append(generate_stats_row('最小值', region_size, 'MIN', len(patient_list), ''))
|
||||
work_sheet.append(generate_stats_row('5%', region_size, 'PERCENTILE', len(patient_list), ',0.05'))
|
||||
work_sheet.append(generate_stats_row('25%', region_size, 'PERCENTILE', len(patient_list), ',0.25'))
|
||||
work_sheet.append(generate_stats_row('50%', region_size, 'PERCENTILE', len(patient_list), ',0.50'))
|
||||
work_sheet.append(generate_stats_row('75%', region_size, 'PERCENTILE', len(patient_list), ',0.75'))
|
||||
work_sheet.append(generate_stats_row('95%', region_size, 'PERCENTILE', len(patient_list), ',0.95'))
|
||||
work_sheet.append(generate_stats_row('最大值', region_size, 'MAX', len(patient_list), ''))
|
||||
|
||||
|
||||
def write_anova(workbook, work_sheet, groups, atlas, data_type):
|
||||
header = ['', 'F', 'p']
|
||||
work_sheet.append(header)
|
||||
|
||||
sheet_dict = {}
|
||||
for group_name, group in groups.items():
|
||||
sheet_dict[group_name] = f'{group_name}_{atlas}_{data_type}'
|
||||
|
||||
first_group_sheet = workbook[list(sheet_dict.values())[0]]
|
||||
for column_num in range(2, first_group_sheet.max_column + 1):
|
||||
data_list = []
|
||||
data_row = [first_group_sheet.cell(row=1, column=column_num).value]
|
||||
|
||||
for group_name, sheet_name in sheet_dict.items():
|
||||
group_data = []
|
||||
data_count = len(groups[group_name])
|
||||
for i in range(2, data_count + 2):
|
||||
data_value = workbook[sheet_name].cell(row=i, column=column_num).value
|
||||
if not np.isnan(data_value):
|
||||
group_data.append(data_value)
|
||||
data_list.append(group_data)
|
||||
|
||||
if len(data_list[0]) == 0:
|
||||
continue
|
||||
|
||||
f_value, p_value = f_oneway(*data_list)
|
||||
data_row += [f_value, p_value]
|
||||
work_sheet.append(data_row)
|
||||
|
||||
|
||||
def main():
|
||||
data_types = ['corr-CBF']
|
||||
atlas_list = ['AnImage_AAL3']
|
||||
|
||||
output_file = r'..\Data\csv-ANOVA-SciPy.xlsx'
|
||||
group_file = r'..\Data\group.xlsx'
|
||||
patient_root = r'..\Data\csv-data'
|
||||
|
||||
groups = {}
|
||||
|
||||
group_workbook = openpyxl.load_workbook(group_file)
|
||||
for sheet_name in group_workbook.sheetnames:
|
||||
group = []
|
||||
group_sheet = group_workbook[sheet_name]
|
||||
for i in range(1, group_sheet.max_row + 1):
|
||||
patient_id = group_sheet.cell(row=i, column=1).value
|
||||
patient_path = os.path.join(patient_root, patient_id)
|
||||
group.append(patient_path)
|
||||
groups[sheet_name] = group
|
||||
|
||||
all_group = []
|
||||
for group in groups.values():
|
||||
all_group += group
|
||||
|
||||
work_book = openpyxl.Workbook()
|
||||
work_book.remove(work_book.active)
|
||||
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
atlas_new_name = atlas.replace("AnImage_", "")
|
||||
for group_key in groups.keys():
|
||||
work_sheet = work_book.create_sheet(title=f'{group_key}_{atlas_new_name}_{data_type}')
|
||||
write_to_work_sheet(work_sheet, groups[group_key], atlas, data_type)
|
||||
|
||||
work_sheet = work_book.create_sheet(title=f'ALL_{atlas_new_name}_{data_type}')
|
||||
write_to_work_sheet(work_sheet, all_group, atlas, data_type)
|
||||
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
atlas_new_name = atlas.replace("AnImage_", "")
|
||||
work_sheet = work_book.create_sheet(title=f'ANOVA_{atlas_new_name}_{data_type}')
|
||||
write_anova(work_book, work_sheet, groups, atlas_new_name, data_type)
|
||||
|
||||
work_book.save(output_file)
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
import numpy as np
|
||||
import openpyxl
|
||||
import os.path
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def draw_cbf_graph(region, data, save_path):
|
||||
bar_x = np.array(['0.5', '1', '1.5', '2', '2.5', 'cCBF'])
|
||||
bar_y = np.array(data)
|
||||
color = ['#4CAF50', '#4CAF50', '#4CAF50', '#4CAF50', '#4CAF50', '#478FC1']
|
||||
|
||||
# Plotting the Graph
|
||||
font_color = '#FFFFFF'
|
||||
plt.rcParams['text.color'] = font_color
|
||||
plt.rcParams['axes.labelcolor'] = font_color
|
||||
plt.rcParams['xtick.color'] = font_color
|
||||
plt.rcParams['ytick.color'] = font_color
|
||||
|
||||
plt.figure(facecolor='#000000')
|
||||
ax = plt.axes()
|
||||
ax.set_facecolor('#000000')
|
||||
ax.spines['left'].set_color(font_color)
|
||||
ax.spines['right'].set_color(font_color)
|
||||
ax.spines['bottom'].set_color(font_color)
|
||||
ax.spines['top'].set_color(font_color)
|
||||
|
||||
plt.rcParams["font.sans-serif"] = ["SimHei"] # 设置字体
|
||||
plt.bar(bar_x, bar_y, width=0.5, color=color)
|
||||
|
||||
for i in range(len(bar_x)):
|
||||
plt.text(i - 0.2, bar_y[i] - 2, f"{bar_y[i]:.2f}")
|
||||
|
||||
plt.title(f"221例健康人西门子5延迟数据矫正前后{region}CBF对比")
|
||||
plt.xlabel("PLD")
|
||||
plt.ylabel("CBF")
|
||||
plt.savefig(save_path, dpi=200)
|
||||
plt.cla()
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def main():
|
||||
data_types = ['CBF1', 'CBF2', 'CBF3', 'CBF4', 'CBF5', 'corr-CBF']
|
||||
# 请尝试添加'AnImage_AAL3'图谱,并修复错误
|
||||
atlas_list = ['AnImage_WholeBrain', 'AnImage_BrainLobes']
|
||||
output_dir = r'..\Data\cbf-graph'
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
input_file = r'..\Data\csv-simple-stat.xlsx'
|
||||
workbook = openpyxl.load_workbook(input_file, data_only=True)
|
||||
|
||||
for atlas in atlas_list:
|
||||
cbf_mean_dict = {}
|
||||
for data_type in data_types:
|
||||
worksheet = workbook[f'{atlas}_{data_type}']
|
||||
for column_num in range(2, worksheet.max_column + 1):
|
||||
name = worksheet.cell(row=1, column=column_num).value
|
||||
mean = worksheet.cell(row=worksheet.max_row - 8, column=column_num).value
|
||||
print(atlas, data_type, name, mean)
|
||||
|
||||
if name not in cbf_mean_dict:
|
||||
cbf_mean_dict[name] = []
|
||||
cbf_mean_dict[name].append(mean)
|
||||
|
||||
for region, data in cbf_mean_dict.items():
|
||||
draw_cbf_graph(region, data, os.path.join(output_dir, f'{region}.png'))
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,50 +0,0 @@
|
|||
import openpyxl
|
||||
import os.path
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def draw_box_graph(region, data, groups, data_type, save_path):
|
||||
plt.rcParams["font.sans-serif"] = ["SimHei"] # 设置字体
|
||||
plt.boxplot(data,
|
||||
patch_artist=True, # 是否填色
|
||||
labels=groups)
|
||||
|
||||
plt.title(f"各个分组{region}{data_type}对比")
|
||||
plt.xlabel("分组")
|
||||
plt.ylabel(data_type)
|
||||
plt.savefig(save_path, dpi=200)
|
||||
plt.cla()
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def main():
|
||||
data_types = ['corr-CBF', "ATT"]
|
||||
# 请尝试添加'AnImage_AAL3'图谱,并修复错误
|
||||
atlas_list = ['AnImage_WholeBrain', 'AnImage_BrainLobes']
|
||||
output_dir = r'..\Data\box-graph'
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
input_file = r'..\Data\csv-group.xlsx'
|
||||
workbook = openpyxl.load_workbook(input_file, data_only=True)
|
||||
groups = ['A', 'B', 'C']
|
||||
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
value_dict = {}
|
||||
for group in groups:
|
||||
worksheet = workbook[f'{group}_{atlas}_{data_type}']
|
||||
for column_num in range(2, worksheet.max_column + 1):
|
||||
data = []
|
||||
name = worksheet.cell(row=1, column=column_num).value
|
||||
for row in range(2, worksheet.max_row - 10):
|
||||
data.append(worksheet.cell(row=row, column=column_num).value)
|
||||
|
||||
if name not in value_dict:
|
||||
value_dict[name] = []
|
||||
value_dict[name].append(data)
|
||||
|
||||
for region, data in value_dict.items():
|
||||
draw_box_graph(region, data, groups, data_type, os.path.join(output_dir, f'{region}{data_type}.png'))
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
import openpyxl
|
||||
import os.path
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def draw_box_graph(data_dict, groups, data_type, save_path):
|
||||
plt.rcParams["font.sans-serif"] = ["SimHei"] # 设置字体
|
||||
|
||||
grid_height = 2
|
||||
grid_width = 4
|
||||
|
||||
_, ax = plt.subplots(grid_height, grid_width, figsize=(20, 15), sharey=True)
|
||||
keys = list(data_dict.keys())
|
||||
for x in range(grid_width):
|
||||
for y in range(grid_height):
|
||||
region_name = keys[x + y * grid_width]
|
||||
ax[y][x].boxplot(data_dict[region_name], patch_artist=True, labels=groups)
|
||||
ax[y][x].set_title(f"{region_name}{data_type}对比")
|
||||
ax[y][x].set_ylabel(data_type)
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig(save_path, dpi=200)
|
||||
plt.cla()
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def main():
|
||||
data_types = ['corr-CBF', "ATT"]
|
||||
atlas_list = ['AnImage_BrainLobes']
|
||||
output_dir = r'..\Data\subplot'
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
input_file = r'..\Data\csv-group.xlsx'
|
||||
workbook = openpyxl.load_workbook(input_file, data_only=True)
|
||||
groups = ['A', 'B', 'C']
|
||||
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
value_dict = {}
|
||||
for group in groups:
|
||||
worksheet = workbook[f'{group}_{atlas}_{data_type}']
|
||||
for column_num in range(2, worksheet.max_column + 1):
|
||||
data = []
|
||||
name = worksheet.cell(row=1, column=column_num).value
|
||||
for row in range(2, worksheet.max_row - 10):
|
||||
data.append(worksheet.cell(row=row, column=column_num).value)
|
||||
|
||||
if name not in value_dict:
|
||||
value_dict[name] = []
|
||||
value_dict[name].append(data)
|
||||
|
||||
draw_box_graph(value_dict, groups, data_type, os.path.join(output_dir, f'{atlas}_{data_type}.png'))
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
import numpy as np
|
||||
import openpyxl
|
||||
import os.path
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def draw_cbf_graph(cbf_mean_dict, data_type, save_path):
|
||||
plt.rcParams["figure.figsize"] = [32, 18]
|
||||
plt.rcParams["figure.autolayout"] = True
|
||||
plt.rcParams["font.sans-serif"] = ["SimHei"] # 设置字体
|
||||
subplot = plt.subplot(1, 1, 1, projection='polar')
|
||||
|
||||
count = len(cbf_mean_dict)
|
||||
theta = np.linspace(0.0, 2 * np.pi, count, endpoint=False)
|
||||
data = np.array(list(cbf_mean_dict.values()))
|
||||
width = 1.8 * np.pi / count
|
||||
subplot.set_theta_zero_location('N')
|
||||
subplot.set_theta_direction(-1)
|
||||
subplot.xaxis.set_ticks(theta)
|
||||
subplot.yaxis.set_ticks([])
|
||||
|
||||
bars = subplot.bar(x=theta, height=data, width=width)
|
||||
for data_value, bar in zip(data, bars):
|
||||
bar.set_facecolor(plt.cm.turbo(data_value / 60))
|
||||
bar.set_alpha(0.8)
|
||||
for theta_value, data_value in zip(theta, data):
|
||||
margin = data.max() * 0.08
|
||||
subplot.annotate(str(round(data_value, 2)), xy=(theta_value, data_value + margin))
|
||||
|
||||
subplot.set_xticklabels(list(cbf_mean_dict.keys()))
|
||||
for label, angle in zip(subplot.get_xticklabels(), theta):
|
||||
x, y = label.get_position()
|
||||
lab = subplot.text(x, y - 0.1, label.get_text(), transform=label.get_transform(), ha=label.get_ha(),
|
||||
va=label.get_va())
|
||||
rotation = - np.rad2deg(angle) + 90
|
||||
if rotation < -90:
|
||||
rotation += 180
|
||||
lab.set_rotation(rotation)
|
||||
subplot.set_xticklabels([])
|
||||
|
||||
plt.title(f"221例健康人西门子5延迟数据各脑区{data_type}")
|
||||
plt.savefig(save_path, dpi=200)
|
||||
plt.cla()
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def main():
|
||||
# 请尝试添加att参数,并调整色阶范围使得CBF=40时为红色,60时为蓝色
|
||||
data_types = ['corr-CBF']
|
||||
atlas_list = ['AnImage_AAL3']
|
||||
output_dir = r'..\Data\polar-graph'
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
input_file = r'..\Data\csv-simple-stat.xlsx'
|
||||
workbook = openpyxl.load_workbook(input_file, data_only=True)
|
||||
|
||||
for data_type in data_types:
|
||||
for atlas in atlas_list:
|
||||
cbf_mean_dict = {}
|
||||
worksheet = workbook[f'{atlas}_{data_type}']
|
||||
for column_num in range(2, worksheet.max_column + 1):
|
||||
name = worksheet.cell(row=1, column=column_num).value
|
||||
mean = worksheet.cell(row=worksheet.max_row - 8, column=column_num).value
|
||||
print(atlas, data_type, name, mean)
|
||||
if type(mean) is float:
|
||||
cbf_mean_dict[name] = mean
|
||||
|
||||
draw_cbf_graph(cbf_mean_dict, data_type, os.path.join(output_dir, f'{atlas}_{data_type}.png'))
|
||||
|
||||
|
||||
main()
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
|
||||
# 计算 12879 * 36521 并打印
|
||||
# product = ???
|
||||
# print(product)
|
||||
|
||||
|
||||
# 计算 100 模 32 并打印
|
||||
# mod = ???
|
||||
# print(mod)
|
||||
|
||||
# 打印Hello\ "World!"
|
||||
# msg = ???
|
||||
# print(msg)
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
import math
|
||||
|
||||
# 请输出以下关于圆的周长和面积的计算,保留3位小数
|
||||
radius = 3
|
||||
pi = math.pi
|
||||
|
||||
# 应当输出 当圆的的半径是3时,它的周长是18.850,他的面积是28.274
|
||||
print('当圆的的半径是时,它的周长是,他的面积是')
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
# 请尝试修改list以满足输出
|
||||
|
||||
list_a = [0]
|
||||
|
||||
list_a += ['Hello'] * 5
|
||||
list_a += [4, 5] * 3
|
||||
|
||||
# 输出[0, 'Hello', 'Hello', 'Hello', 'Hello', 'Hello', 4, 5, 4, 5, 4, 5]
|
||||
print(list_a)
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
|
||||
# 我们用如下数组存储一些患者的信息
|
||||
patient_info = [
|
||||
["Zhang San", 52, "M"],
|
||||
["Li Si", 19, "F"],
|
||||
["Wang Wu", 47, "F"],
|
||||
]
|
||||
|
||||
# 检查后发现,张三的年龄和李四的年龄写反了,请将他们调整过来
|
||||
|
||||
|
||||
|
||||
|
||||
# 应当为 19
|
||||
print(patient_info[0][1])
|
||||
|
||||
# 应当为 52
|
||||
print(patient_info[1][1])
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
age_dict = {
|
||||
"Zhang San": 52,
|
||||
"Li Si": 19,
|
||||
"Wang Wu": 47
|
||||
}
|
||||
|
||||
# 检查后发现,张三的年龄和李四的年龄写反了,请将他们调整过来。并添加Zhao Liu,年龄为29
|
||||
|
||||
|
||||
|
||||
|
||||
# 应当为 19
|
||||
print(age_dict['Zhang San'])
|
||||
|
||||
# 应当为 52
|
||||
print(age_dict['Li Si'])
|
||||
|
||||
# 应当为 29
|
||||
print(age_dict['Zhao Liu'])
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
# 请根据用户输入的年龄输出在学校的阶段:
|
||||
# 0-5 输出 学龄前
|
||||
# 6-11 输出 小学
|
||||
# 12-14 输出 初中
|
||||
# 15-17 输出 高中
|
||||
# 18-21 输出 大学
|
||||
# 22-59 输出 打工人
|
||||
# 60+ 输出 退休了
|
||||
|
||||
print('请输入你的年龄:')
|
||||
age = int(input())
|
||||
if age < 6:
|
||||
print('学龄前')
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
|
||||
|
||||
# 我们希望将患者按照信息,分别按照性别和年龄分成4个组别,分别存储他们的姓名
|
||||
patient_info = [
|
||||
["Zhang San", 52, "M"],
|
||||
["Li Si", 19, "F"],
|
||||
["Wang Wu", 47, "F"],
|
||||
["Zhao Liu", 29, "M"],
|
||||
]
|
||||
group_M_under_30 = []
|
||||
group_M_over_30 = []
|
||||
group_F_under_30 = []
|
||||
group_F_over_30 = []
|
||||
|
||||
for patient in patient_info:
|
||||
print('new patient:', patient)
|
||||
# 补充代码,使用条件判断,将患者分为4组,分别存储他们的姓名即可
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# 应当为 ["Zhao Liu"]
|
||||
print(group_M_under_30)
|
||||
|
||||
# 应当为 ["Zhang San"]
|
||||
print(group_M_over_30)
|
||||
|
||||
# 应当为 ["Li Si"]
|
||||
print(group_F_under_30)
|
||||
|
||||
# 应当为 ["Wang Wu"]
|
||||
print(group_F_over_30)
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
# 给定一组整数,计算他们的和并输出等式
|
||||
|
||||
magic_number_list = [4, 8, 15, 16, 23, 42]
|
||||
|
||||
sum = 0
|
||||
expression = ''
|
||||
for index in range(len(magic_number_list)):
|
||||
print(index)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# 应输出4 + 8 + 15 + 16 + 23 + 42 = 108
|
||||
print(expression)
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
import random
|
||||
|
||||
# 我们来写一个猜数字的小游戏
|
||||
|
||||
# 生成一个随机的1-100整数作为答案
|
||||
answer = random.randint(1, 100)
|
||||
game_over = False
|
||||
count = 0
|
||||
while not game_over:
|
||||
print(f"你已经猜了{count}次,请输入下一次猜测")
|
||||
count += 1
|
||||
guess = int(input())
|
||||
print(f"你的猜测是{guess}")
|
||||
# 如果猜对了,输出“猜对了”并结束游戏。否则输出“猜大了”或者“猜小了”
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
# 我们来输出100以内的素数
|
||||
|
||||
def is_prime(n):
|
||||
# 检查n是否是素数,一个简易的办法是,遍历小于n的自然数,判断是否有自然数可以整除n
|
||||
return False
|
||||
|
||||
|
||||
for i in range(100):
|
||||
# 遍历100以内的自然数,检查他们是不是素数,并输出其中的素数
|
||||
result = is_prime(i)
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
import random
|
||||
|
||||
|
||||
# 概率计算表明,23个人中有至少两人生日相同的概率高达50.7%,听起来有些反直觉,我们来通过计算估计这一概率。
|
||||
def contains_same_birthday(birthdays):
|
||||
# 修改代码,判断birthdays这个list中是否有相同的元素
|
||||
return True
|
||||
|
||||
|
||||
# 我们进行10000次测试
|
||||
test_count = 10000
|
||||
# 有至少两人生日相同的次数
|
||||
true_count = 0
|
||||
|
||||
for i in range(test_count):
|
||||
birth_days = []
|
||||
for j in range(23):
|
||||
birth_days.append(random.randint(1, 365))
|
||||
|
||||
# 统计满足条件的测试次数
|
||||
|
||||
# 输出测试结果,保留4位小数
|
||||
print('23个人中有至少两人生日相同的概率是:')
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
|
||||
|
||||
# 哥德巴赫猜想(Goldbach's conjecture)是数论中存在最久的未解问题之一。这个猜想最早出现在1742年普鲁士数学家克里斯蒂安·哥德巴赫与瑞士数学家莱昂哈德·欧拉的通信中。用现代的数学语言,哥德巴赫猜想可以陈述为:
|
||||
# 任一大于2的偶数,都可表示成两个素数之和
|
||||
# 验证1000以内偶数是否满足这一猜想,并输出每一个结果,例如
|
||||
# 4 = 2 + 2
|
||||
# 6 = 3 + 3
|
||||
# 8 = 3 + 5
|
||||
|
|
@ -1,3 +1,2 @@
|
|||
# ASL成像基于Python的定量数据处理基础
|
||||
|
||||
# python_for_asl
|
||||
|
||||
|
|
|
|||
|
|
@ -1,32 +0,0 @@
|
|||
attr==0.3.2
|
||||
ConfigParser==7.2.0
|
||||
contextlib2==21.6.0
|
||||
cryptography==45.0.5
|
||||
Cython==3.1.2
|
||||
dl==0.1.0
|
||||
docutils==0.22
|
||||
HTMLParser==0.0.2
|
||||
ipython==7.34.0
|
||||
ipywidgets==8.1.7
|
||||
Jinja2==3.0.3
|
||||
jnius==1.1.0
|
||||
keyring==25.6.0
|
||||
lockfile==0.12.2
|
||||
matplotlib==3.3.4
|
||||
mock==5.2.0
|
||||
numpy==1.21.6
|
||||
openpyxl==3.1.3
|
||||
Pillow==9.5.0
|
||||
Pillow==11.3.0
|
||||
protobuf==3.14.0
|
||||
pyOpenSSL==25.1.0
|
||||
pywin32==308
|
||||
railroad==0.5.0
|
||||
scipy==1.5.4
|
||||
Sphinx==8.2.3
|
||||
thread==2.0.5
|
||||
tornado==6.2
|
||||
trove_classifiers==2025.5.9.12
|
||||
truststore==0.10.3
|
||||
urllib3_secure_extra==0.1.0
|
||||
xmlrpclib==1.0.1
|
||||
Loading…
Add table
Add a link
Reference in a new issue