import os import re import sys sys.path.append("/home/cv/workspace/tujintao/document_segmentation") from Utils.main_clear.sci_clear import non_data_latex_iter filename = "Data/samples" def read_data(directory): all_documents = [] all_labels = [] for filename in os.listdir(directory)[:2]: if filename.endswith(".txt"): filepath = os.path.join(directory, filename) # print(filepath) # 读取txt文件内容并处理每一行结尾 with open(filepath, "r", encoding="utf-8") as file: lines = file.readlines() # for i in range(len(lines)): # if not lines[i].endswith("
------------------------1\n"): # lines[i] = re.sub(r'------------------------1$', '
------------------------1\n', lines[i]) # 将所有行的内容拼接为一行,并清除无关符号 text = "".join(lines) text = re.sub(r'||||

|

|\s*', '', text) text = re.sub(r'\s*', '', text) text = re.sub(r'\s*', ' ', text) text = re.sub(r'\s*|||', '', text) text = re.sub(r'\s*', '', text) text = re.sub('', '', text) text = re.sub("【公式latex提取失败】", "【公式】", text) text = re.sub("", "【图片】", text) text = re.sub(r"(.*?)", r"\1", text) text = non_data_latex_iter(text) text = re.sub(r'', '\n', text) text = re.sub(r'(------------------------1)(?!\n)', r'\1\n', text) # print(text) # 提取标签, 并过滤掉空句子 labels = [] sentences = [] for sentence in text.split("\n"): # print(sentence) if re.search("------------------------1", sentence.strip()): if sentence.strip().startswith("------------------------1"): if labels: labels[-1] = 1 # 将前一个非空句子的标签设为1 else: labels.append(1) sentence = re.sub("------------------------1", "", sentence) else: if sentence.strip(): labels.append(0) if sentence.strip(): sentences.append(sentence.strip()) print("句子数目:", len(sentences)) all_documents.append(sentences) all_labels.append(labels) return all_documents, all_labels def split_dataset(input_texts, segment_labels, train_ratio=0.7, valid_ratio=0.1): """把数据划分为 Train/Valid/Test Set""" total_samples = len(input_texts) train_size = int(total_samples * train_ratio) valid_size = int(total_samples * valid_ratio) test_size = total_samples - train_size - valid_size train_doc = input_texts[:train_size] train_seg_labels = segment_labels[:train_size] valid_doc = input_texts[train_size:train_size + valid_size] valid_seg_labels = segment_labels[train_size:train_size + valid_size] test_doc = input_texts[-test_size:] test_seg_labels = segment_labels[-test_size:] return (train_doc, train_seg_labels), (valid_doc, valid_seg_labels), ( test_doc, test_seg_labels) def get_token(sentences): all_tokens = tokenizer.encode("\n".join(sentences)) bef_sent_tokens = [] aft_sent_tokens = all_tokens[1:-1] # 包含当前句及之后句token sents_token_range = [] sent_idx = [] #(start_idx, local_sent_len):相对应索引的句子中真正想要的实际句子索引 tokens_per_sent = [] for idx, one_sent in enumerate(sentences): token_id = tokenizer.encode(one_sent) print(token_id) tokens_per_sent.append(token_id[1:-1]) local_sent_len = len(token_id[1:-1]) if not idx: sents_token_range.append(aft_sent_tokens[0:510]) sent_idx.append("{},{}".format(0, local_sent_len)) else: if len(token_id[1:-1]) > 200: # 当前句token长超过200时,开始截断,再前面取 150 后取160 bef_lenght = 150 elif idx == len(sentences) - 1: # 最后一个句子 bef_lenght = 200 else: bef_lenght = int((510 - len(token_id[1:-1])) * 0.4) aft_lenght = 510 - len(bef_sent_tokens[-bef_lenght:]) # 当bef_sent_tokens中不到150个数时 sents_token_range.append(bef_sent_tokens[-bef_lenght:] + aft_sent_tokens[:aft_lenght]) sent_idx.append("{},{}".format(len(bef_sent_tokens[-bef_lenght:]), local_sent_len)) aft_sent_tokens = aft_sent_tokens[local_sent_len:] bef_sent_tokens.extend(token_id[1:-1]) return tokens_per_sent, sents_token_range, sent_idx if __name__ == '__main__': text = r"""
物理量12345678
纸质h(m)0.12260.15100.18200.21530.25170.29000.33160.3753
$\frac{{v}^{2}}{2}$(m2·s-21.101【公式】291.521.742.002.27
木质h(m)0.06050.08250.10900【公式】14000.17400.21150.25300.2980
$\frac{{v}^{2}}{2}$(m2·s-20.7351.031.321.601.952.34
铁质h(m)0.09530.12440.15740.19440.23520.27990.32850.3810
$\frac{{v}^{2}}{2}$(m2·s-21.211.531.892.282.723.19
""" # aa = [1,2,3,6,3,3,7,5,4,4,8,8] # print(aa[-60:]) # print(aa[:60]) # print(aa[-3:3]) # print(int(470*0.4)) from Utils.train_configs import data_dir # from transformers import BertTokenizer # tokenizer = BertTokenizer.from_pretrained('Models/bert-base-chinese', use_fast=True, do_lower_case=True) all_documents, all_labels = read_data(data_dir) # tokens_per_sent, sents_token_range, sent_idx = get_token(all_documents[0]) # for i in range(len(tokens_per_sent)): # print("参考句:", all_documents[0][i]) # print("上下句范围:", tokenizer.decode(sents_token_range[i])) # st, lenght = sent_idx[i].split(",") # print("本句:", tokenizer.decode(sents_token_range[i][int(st): int(st)+int(lenght)])) # print("****************************************") import torch # last_hidden_states = torch.randn(3, 10, 20) # seq_idxs = [[1, 3], [2, 5], [3, 7]] # # last_hidden_states = map(lambda x: x[0], seq_idxs) # padded_sliced_hidden_states = [last_hidden_states[i, seq_idxs[i][0]:seq_idxs[i][1], :] # for i in range(last_hidden_states.size(0))] # for i in range(last_hidden_states.size(0)): # aa = last_hidden_states[i, seq_idxs[i][0]:seq_idxs[i][0]+seq_idxs[i][1], :] # print(aa.size()) # bb = aa.mean(dim=0) # print(bb) # concatenated_tensor = torch.stack([last_hidden_states[i, seq_idxs[i][0]:seq_idxs[i][0]+seq_idxs[i][1], :].mean(dim=0) for i in range(last_hidden_states.size(0))], dim=0) # print(concatenated_tensor) # for i, j in enumerate(range(2)): # print(i, j) tensor = torch.randn(2, 3) tensor_expanded = tensor.unsqueeze(0) print(tensor_expanded.shape) for i in range(0): print(11111111111111111111111111) # 假设你有两个张量 # tensor1 = torch.randn(300, 512, 768) # 第一个张量,大小为 [300, 512, 768] # tensor2 = torch.randn(22, 512, 768) # 第二个张量,大小为 [22, 512, 768] # # 使用 torch.cat 在第一维度上连接这两个张量 # combined_tensor = torch.cat((tensor1, tensor2), dim=0) # print(combined_tensor.shape) # 输出应该是 torch.Size([522, 512, 768])