from transformers import AutoTokenizer
tokenzer = AutoTokenizer.from_pretrained({model_path})
# new tokens
new_tokens = "[NEW]"
# check if the tokens are already in the vocabulary
new_tokens = set(new_tokens) - set(model.tokenizer.vocab.keys())
tokenizer.add_tokens(list(new_tokens))
model.resize_token_embeddings(len(tokenizer))
resize_token_embeddings(len(tokenizer))를 안해주게 되면 임베딩 에러 발생
Sbert 의 경우
# ADD tokens
tokens = ["[NEW]"]
embedding_model = model._first_module()
embedding_model.tokenizer.add_tokens(tokens, special_tokens=True)
embedding_model.auto_model.resize_token_embeddings(
len(embedding_model.tokenizer))
pooling_model = models.Pooling(
embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[embedding_model, pooling_model])