tokenizer.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. import sys
  2. import torch
  3. from torch.utils.data import TensorDataset, DataLoader
  4. from torch.utils.data import DataLoader, Dataset
  5. from concurrent.futures import ThreadPoolExecutor
  6. from transformers import BertTokenizer
  7. sys.path.append("/home/cv/workspace/tujintao/document_segmentation")
  8. from Utils.read_data import read_data, split_dataset
  9. tokenizer = BertTokenizer.from_pretrained('Models/bert-base-chinese', use_fast=True, do_lower_case=True)
  10. class DataSetForSingleDocumet(Dataset):
  11. def __init__(self, datasets, llabels, max_workers=10, max_seq_len=512, max_sentences_num=128):
  12. """
  13. :param bert_tokenizer: 分词器
  14. :param max_workers: 包含列名comment和sentiment的data frame
  15. """
  16. self.pool = ThreadPoolExecutor(max_workers=max_workers)
  17. self.documents = datasets
  18. self.llabels = llabels
  19. self.max_seq_len = max_seq_len
  20. # self.max_sentences_num = max_sentences_num
  21. def __len__(self):
  22. return len(self.documents)
  23. def __getitem__(self, index):
  24. """
  25. 一个dataset包含多个样本,一个样本包含多个句子,每个句子编码需要考虑上下句;
  26. 若每个句子单独编码,可以考虑bert后接个双向的lstm
  27. dataset:[[sent1, sent2,...],....]
  28. """
  29. sentences = self.documents[index]
  30. labels = self.llabels[index]
  31. inputs = tokenizer(sentences, padding='max_length', truncation=True, max_length=self.max_seq_len,
  32. return_tensors='pt')
  33. t_seqs = torch.tensor(inputs['input_ids'].squeeze(0), dtype=torch.long)
  34. t_seq_masks = torch.tensor(inputs['attention_mask'].squeeze(0), dtype=torch.long)
  35. t_labels = torch.tensor(labels, dtype=torch.long)
  36. return t_seqs, t_seq_masks, t_labels
  37. def load_data(filepath, batch_size=1):
  38. """
  39. 加载excel文件,有train和test 的sheet
  40. :param filepath: 文件路径
  41. :param pretrained_model_name_or_path: 使用什么样的bert模型
  42. :param max_seq_len: bert最大尺寸,不能超过512
  43. :param batch_size: 小批量训练的数据
  44. :return: 返回训练和测试数据迭代器 DataLoader形式,
  45. 一条数据代表一个文件!!!,由于显存限制,一般一次性取一个文件,但这个文件的句子数目不定 ===>还是占显存太大
  46. 需要切割文档,对文档句子作限制!!!!
  47. """
  48. # --------txt文件数据读取-------------
  49. all_documents, all_labels = read_data(filepath)
  50. train_data, valid_data, test_data = split_dataset(all_documents, all_labels)
  51. train_doc, train_seg_labels = train_data
  52. valid_doc, valid_seg_labels = valid_data
  53. test_doc, test_seg_labels = test_data
  54. print("Train size:", len(train_doc))
  55. print("Valid size:", len(valid_doc))
  56. print("Test size:", len(test_doc))
  57. # ---------------统计句长-----------------
  58. # max_tl = 0
  59. # max_token = []
  60. # for sentences in all_documents:
  61. # for one_sent in sentences:
  62. # token_id = tokenizer.encode(one_sent)
  63. # if len(token_id) > max_tl:
  64. # max_tl = len(token_id)
  65. # max_token = token_id
  66. # print("最大句token长:", max_tl) # 473
  67. # print("最大token:", tokenizer.decode(max_token))
  68. # -----------------------------------------
  69. train_dataset = DataSetForSingleDocumet(train_doc, train_seg_labels, max_seq_len=96)
  70. valid_dataset = DataSetForSingleDocumet(valid_doc, valid_seg_labels, max_seq_len=96)
  71. test_dataset = DataSetForSingleDocumet(test_doc, test_seg_labels, max_seq_len=96)
  72. train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)
  73. valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size)
  74. test_dataloader = DataLoader(test_dataset, batch_size=batch_size)
  75. # for batch in tqdm(test_dataloader): # 测试用
  76. # # batch_labels的shape:[1,74],这里的1是因为batch_size为1
  77. # print(batch, batch[0].size())
  78. return train_dataloader, valid_dataloader, test_dataloader
  79. # from Utils.configs import data_dir
  80. # from tqdm import tqdm
  81. # load_data(data_dir)
  82. sentences = ["1963年出生,工科学士,高级工程师,北京物资学院客座副教授。",
  83. "1985年8月—1993年在国家物资局、物资部、国内贸易部金属材料流通司从事国家统配钢材中特种钢材品种的全国调拔分配工作,先后任科员、副主任科员、主任科员。",
  84. "1993年5月—1999年5月受国内贸易部委派到国内贸易部、冶金部、天津市政府共同领导组建的北洋(天津)钢材批发交易市场任理事长助理、副总裁。",
  85. ]
  86. inputs = tokenizer(sentences, padding='max_length', truncation=True, max_length=96,
  87. return_tensors='pt')
  88. print(inputs)