123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264 |
- import torch
- import json
- class dtypeEncoder(json.JSONEncoder):
- def default(self, obj):
- if isinstance(obj, torch.dtype): # might wanna add np, jnp types too?
- return str(obj)
- return json.JSONEncoder.default(self, obj)
- # d = {"torch_dtype": torch.float16}
- # # json.dumps(d) ## fail: TypeError: Object of type dtype is not JSON serializable
- # json.dumps(d, cls=dtypeEncoder) ## woot: {"torch_dtype": "torch.float16"}
- import json
- import torch
- import sys
- import re
- sys.path.append(r'/home/cv/workspace/tujintao/document_segmentation')
-
- # from Utils.read_data import read_data
- from tqdm import tqdm
- import numpy as np
- from pprint import pprint
- from torch.utils.data import DataLoader, Dataset
- from concurrent.futures import ThreadPoolExecutor
- class ListDataset(Dataset):
- def __init__(self, file_path=None, data=None, tokenizer=None, max_len=None, label_list=None, **kwargs):
- self.kwargs = kwargs
- self.tokenizer = tokenizer
- self.max_len = max_len
- self.label_list = label_list
- if isinstance(file_path, (str, list)):
- self.data = self.load_data(file_path, tokenizer, max_len, label_list)
- elif isinstance(data, list):
- self.data = data
- elif isinstance(data, tuple):
- # 数据量大,需要用多进程
- all_data, all_allback_info = [], []
- executor = ThreadPoolExecutor(max_workers=20) # 开2个线程会稍微快点
- for res in executor.map(self.format_data, zip(data[0], data[1])):
- all_data.append(res[0])
- all_allback_info.append(res[1])
- self.data = (all_data, all_allback_info)
- # 单进程处理
- # self.data = format_data(data, label_list, tokenizer, max_len)
- else:
- raise ValueError('The input args shall be str format file_path / list format dataset')
- def __len__(self):
- return len(self.data)
- def __getitem__(self, index):
- return self.data[index]
-
- def format_data(self, doc_seg_labels):
- """
- doc_seg_labels:(doc_list:list, seg_labels: list)
- 将划分了训练集、验证集、测试集的数据,再按规定格式进行整理
- """
- one_d, d_labels = doc_seg_labels[0], doc_seg_labels[1]
- inputs = self.tokenizer(one_d, padding='max_length', truncation=True,
- max_length=self.max_len, return_tensors='pt')
- label = []
- label_dict = {x: [] for x in self.label_list}
- for lab in d_labels:
- label.append([lab[0], lab[1], "TOPIC"])
- label_dict.get("TOPIC", []).append((one_d[lab[0]:lab[1]], lab[0]))
- # label为[[start, end, entity], ...]
- # one_d[start:end]为一个topic_item
- return (inputs, label), (one_d, label_dict)
- # 加载实体(试题)识别数据集
- class NerDataset(ListDataset):
- @staticmethod
- def load_data1(filename, tokenizer, max_len, label_list):
- data = []
- callback_info = [] # 用于计算评价指标
- with open(filename, encoding='utf-8') as f:
- f = f.read()
- f = json.loads(f)
- for d in f:
- text = d['text']
- if len(text) == 0:
- continue
- labels = d['labels']
- tokens = [i for i in text]
- if len(tokens) > max_len - 2:
- tokens = tokens[:max_len - 2]
- text = text[:max_len]
- tokens = ['[CLS]'] + tokens + ['[SEP]']
- token_ids = tokenizer.convert_tokens_to_ids(tokens)
- label = []
- label_dict = {x: [] for x in label_list}
- for lab in labels: # 这里需要加上CLS的位置, lab[3]不用加1,因为是实体结尾的后一位
- label.append([lab[2] + 1, lab[3], lab[1]])
- label_dict.get(lab[1], []).append((text[lab[2]:lab[3]], lab[2]))
- data.append((token_ids, label)) # label为[[start, end, entity], ...]
- callback_info.append((text, label_dict))
- return data, callback_info
- from transformers import BertTokenizer
- model_dir = r'/home/cv/workspace/tujintao/PointerNet_Chinese_Information_Extraction/UIE/model_hub/chinese-bert-wwm-ext/'
- tokenizer = BertTokenizer.from_pretrained(model_dir)
- def format_data(doc_seg_labels, max_seq_len=240):
- """
- doc_seg_labels:doc_list及seg_labels
- 将划分了训练集、验证集、测试集的数据,再按规定格式进行整理
- """
- # one_d, d_labels = doc_seg_labels["input_txt"], doc_seg_labels["segment_label"]
- # input_ids = []
- # seg_labels = []
- # input_txts = []
- # data, callback_info = [], []
- one_d, d_labels = doc_seg_labels
- # for one_d, d_labels in zip(doc_seg_labels["input_txt"], doc_seg_labels["segment_label"]):
- # 存在空句子的情况处理
- if any([True for sent in one_d if not sent.strip()]):
- sentences, new_labels = [], []
- for lab in d_labels:
- # print(lab)
- if not one_d[lab[0]].strip():
- one_d[lab[0]+1] = "【start:1】" + one_d[lab[0]+1]
- else:
- one_d[lab[0]] = "【start:1】" + one_d[lab[0]]
- if not one_d[lab[1]-1].strip():
- one_d[lab[1]-2] += "【end:1】"
- else:
- one_d[lab[1]-1] += "【end:1】"
- # print(one_d, 999999999999999999999)
- all_sents = [sent for sent in one_d if sent.replace("【start:1】", "").replace("【end:1】", "").strip()]
- st = 0
- for n, sentence in enumerate(all_sents):
- if sentence.startswith("【start:1】"):
- sentence = sentence.replace("【start:1】", "")
- st = n
- if sentence.endswith("【end:1】"):
- sentence = sentence.replace("【end:1】", "")
- new_labels.append((st, n+1))
- # pprint(all_sents[st:n+1])
- sentences.append(sentence)
- one_d = sentences
- d_labels = new_labels
- # -----------异常检测-----------------------------------
- # lst = 0
- # for dd in d_labels:
- # if dd[1] < dd[0] or dd[0]<lst:
- # print("异常标签:", d_labels)
- # lst = dd[1]
- # print("******************************************************")
-
- inputs = tokenizer(one_d, padding='max_length', truncation=True,
- max_length=max_seq_len, return_tensors='pt')
- # input_ids.append(inputs)
- label = []
- label_dict = {"TOPIC": []}
- for lab in d_labels:
- label.append([lab[0], lab[1], "TOPIC"]) #
- label_dict.get("TOPIC", []).append((one_d[lab[0]:lab[1]], lab[0]))
- # data.append([inputs, label]) # label为[[start, end, entity], ...]
- # callback_info.append([one_d, label_dict]) # one_d[start:end]为一个topic_item
- # seg_labels.append(label)
- # input_txts.append(one_d)
- # callback_info.append(label_dict)
- # return {"input_ids": input_ids, "seg_labels": seg_labels,
- # "input_txts": input_txts, "callback_info": callback_info} #
- return (inputs, label), (one_d, label_dict)
-
-
- def load_and_split_dataset(filename, train_ratio=0.7, valid_ratio=0.1):
- # -----------正式的大批量样本----------------------
- with open(filename, "r", encoding="utf-8") as f1:
- sample5w = json.load(f1)
- input_texts = sample5w["input_txts"]
- segment_labels = sample5w["segment_labels"]
- # print("input_texts:::", input_texts[:1])
- # json.dump({"input_txts": input_txts, "segment_labels": segment_labels}, f1, ensure_ascii=False)
- # 把数据划分为 Train/Valid/Test Set
- total_samples = len(input_texts)
- train_size = int(total_samples * train_ratio)
- valid_size = int(total_samples * valid_ratio)
- test_size = total_samples - train_size - valid_size
- train_doc = input_texts[:train_size]
- train_seg_labels = segment_labels[:train_size]
- valid_doc = input_texts[train_size:train_size + valid_size]
- valid_seg_labels = segment_labels[train_size:train_size + valid_size]
- test_doc = input_texts[-test_size:]
- test_seg_labels = segment_labels[-test_size:]
- return (train_doc, train_seg_labels), (valid_doc, valid_seg_labels), (
- test_doc, test_seg_labels)
- def main():
- # with open(filename, "r", encoding="utf-8") as f1:
- # sample5w = json.load(f1)
- # input_texts = sample5w["input_txts"]
- # segment_labels = sample5w["segment_labels"]
- path0 = "/home/cv/workspace/tujintao/document_segmentation/Data/samples/train_data.jsonl"
- train_dataset = load_dataset("json", data_files=path0)["train"][:]
-
- all_data, all_allback_info = [], []
- executor = ThreadPoolExecutor(max_workers=20) # 开2个线程会稍微快点
- for res in executor.map(format_data, zip(train_dataset["input_txt"],train_dataset["segment_label"])):
- all_data.append(res[0])
- all_allback_info.append(res[1])
- # 保存
- with open("/home/cv/workspace/tujintao/document_segmentation/Data/samples/token_datasets_6w.json", "w", encoding="utf-8") as f1:
- json.dump({"input": all_data, "allback_info": all_allback_info}, f1, ensure_ascii=False)
-
- # return all_data, all_allback_info
- if __name__ == "__main__":
- from datasets import load_dataset
- import datasets
- # path0 = "/home/cv/workspace/tujintao/document_segmentation/Data/samples/train_data.jsonl"
- # train_dataset = load_dataset("json", data_files=path0)["train"]
- # path = "/home/cv/workspace/tujintao/document_segmentation/Data/samples/train_data.json"
- main()
- # 数据集处理
- # train_dataset = train_dataset.map(
- # format_data,
- # keep_in_memory=True,
- # remove_columns=list(train_dataset.features),
- # batched=True,
- # batch_size=1,
- # num_proc=2,
- # desc="Running tokenizer on dataset"
- # )
- # 此方法保存的数据太大
- # train_dataset.save_to_disk("/home/cv/workspace/tujintao/document_segmentation/Data/samples/dataset_6w")
- # train_data, valid_data, test_data = load_and_split_dataset(path,train_ratio=0.995, valid_ratio=0.003)
- # 数据加载
- # dataset = datasets.load_from_disk("/home/cv/workspace/tujintao/document_segmentation/Data/samples/dataset_6w")
- # a=dataset[0]['input_ids']['attention_mask']
- # print(len(a))
|