vqa_dataset.py 4.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 3 Community License Agreement.
  3. import copy
  4. from datasets import load_dataset
  5. import itertools
  6. import torch
  7. # check system prompt token seq or user prompt token seq is in the current token list
  8. def check_header(targets,seq):
  9. for i in range(len(seq)-3):
  10. if seq[i:i+3] in targets:
  11. return True
  12. return False
  13. def replace_target(target,seq):
  14. for i in range(len(seq)-3):
  15. if seq[i:i+3] == target:
  16. seq[i],seq[i+1],seq[i+2] = -100,-100,-100
  17. return seq
  18. def tokenize_dialogs(dialogs, images, processor):
  19. # If vocab size is above 128000, use the chat template to generate the tokens as it is from Llama 3 family models
  20. text_prompt = processor.apply_chat_template(dialogs)
  21. #print("text_prompt",text_prompt)
  22. batch = processor(images=images, text=text_prompt,padding = True, return_tensors="pt")
  23. label_list = []
  24. for i in range(len(batch["input_ids"])):
  25. dialog_tokens = batch["input_ids"][i].tolist()
  26. labels = copy.copy(dialog_tokens)
  27. eot_indices = [i for i,n in enumerate(labels) if n == 128009]
  28. last_idx = 0
  29. # system prompt header "<|start_header_id|>system<|end_header_id|>" has been tokenized to [128006, 9125, 128007]
  30. # user prompt header "<|start_header_id|>user<|end_header_id|>" has been tokenized to [128006, 882, 128007]
  31. prompt_header_seqs = [[128006, 9125, 128007],[128006, 882, 128007]]
  32. for n, idx in enumerate(eot_indices):
  33. current_seq = labels[last_idx:idx+1]
  34. if check_header(prompt_header_seqs,current_seq):
  35. # found prompt header, indicating that this seq should be masked
  36. labels[last_idx:idx+1] = [-100] * (idx-last_idx+1)
  37. else:
  38. last_idx = idx+1
  39. # Mask all the assistant header prompt <|start_header_id|>assistant<|end_header_id|>, which has been tokenized to [128006, 78191, 128007]
  40. assistant_header_seq = [128006, 78191, 128007]
  41. labels = replace_target(assistant_header_seq,labels)
  42. # Mask the padding token and image token 128256
  43. for i in range(len(labels)):
  44. if labels[i] == processor.tokenizer.pad_token_id or labels[i] == 128256: # 128256 is image token index
  45. labels[i] = -100
  46. label_list.append(labels)
  47. batch["labels"] = torch.tensor(label_list)
  48. tokenizer_length = len(processor.tokenizer)
  49. return batch
  50. def get_custom_dataset(dataset_config, processor, split, split_ratio=0.9):
  51. # load_dataset will return DatasetDict that contains all the data in the train set
  52. dataset_dict = load_dataset("HuggingFaceM4/the_cauldron", name="ai2d")
  53. dataset = dataset_dict['train']
  54. dataset = dataset.train_test_split(test_size=1-split_ratio, shuffle=True, seed=42)[split]
  55. return dataset
  56. class VQADataCollator:
  57. def __init__(self, processor):
  58. self.processor = processor
  59. self.processor.tokenizer.padding_side = "right" # during training, one always uses padding on the right
  60. def __call__(self, samples):
  61. dialogs,images = [],[]
  62. for sample in samples:
  63. image_list,sample_list = sample["images"],sample["texts"]
  64. if len(image_list) > 1:
  65. raise ValueError("Only support one image per sample")
  66. image = image_list[0].convert("RGB") # only use the first image
  67. dialog = []
  68. for sample_dict in sample_list:
  69. if not dialog:
  70. # only append image to the first sentence
  71. dialog += [
  72. {"role":"user","content":[{"type": "image"},{"type": "text", "text": sample_dict["user"].strip()}]},
  73. {"role":"assistant","content":[{"type": "text", "text": sample_dict["assistant"].strip()}]}
  74. ]
  75. else:
  76. dialog += [
  77. {"role":"user","content":[{"type": "text", "text": sample_dict["user"].strip()}]},
  78. {"role":"assistant","content":[{"type": "text", "text": sample_dict["assistant"].strip()}]}
  79. ]
  80. dialogs.append(dialog)
  81. images.append([image])
  82. return tokenize_dialogs(dialogs,images, self.processor)
  83. def get_data_collator(processor):
  84. return VQADataCollator(processor)