1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607 | """Utility functions related to tokenisation."""
import logging
import re
import typing as t
import torch
from transformers import MistralCommonTokenizer
from .enums import GenerativeType
from .exceptions import InvalidModel
from .utils import log_once
if t.TYPE_CHECKING:
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from .data_models import DatasetConfig, ModelConfig
logger = logging.getLogger("euroeval")
def get_special_token_metadata(tokeniser: "PreTrainedTokenizerBase") -> dict:
"""Get the special token metadata for a tokeniser.
Args:
tokeniser:
The tokeniser.
Returns:
The special token metadata.
"""
# Create some test input IDs, to check if the tokeniser is adding special tokens
test_input_ids = tokeniser("Test").input_ids
# Extract the CLS token IDs from the tokeniser, if it's using them
has_cls_token = True
if tokeniser.cls_token_id in test_input_ids:
cls_token_id = tokeniser.cls_token_id
cls_token = tokeniser.cls_token
elif tokeniser.bos_token_id in test_input_ids:
cls_token_id = tokeniser.bos_token_id
cls_token = tokeniser.bos_token
elif tokeniser.cls_token is not None:
cls_token_id = tokeniser.cls_token_id
cls_token = tokeniser.cls_token
has_cls_token = False
else:
cls_token_id = tokeniser.bos_token_id
cls_token = tokeniser.bos_token
has_cls_token = False
# Extract the SEP token IDs from the tokeniser, if it's using them
has_sep_token = True
if tokeniser.sep_token_id in test_input_ids:
sep_token = tokeniser.sep_token
elif tokeniser.eos_token_id in test_input_ids:
sep_token = tokeniser.eos_token
elif tokeniser.sep_token is not None:
sep_token = tokeniser.sep_token
has_sep_token = False
else:
sep_token = tokeniser.eos_token
has_sep_token = False
return dict(
cls_token_id=cls_token_id,
cls_token=cls_token,
sep_token=sep_token,
has_cls_token=has_cls_token,
has_sep_token=has_sep_token,
)
def should_prompts_be_stripped(
labels_to_be_generated: list[str], tokeniser: "PreTrainedTokenizer"
) -> bool:
"""Determine if we should strip the prompts for few-shot evaluation.
This is the case if the tokeniser needs to include the space as part of the label
token. The strategy is thus to tokenise a label with a preceeding colon (as in the
prompts), i.e., ": positive", and check if the tokenisation starts with the tokens
of ": ". If this is the case, then we should not strip the prompts, since the
tokeniser produces the whitespace token separately.
Args:
labels_to_be_generated:
The labels that are to be generated.
tokeniser:
The tokeniser used to tokenise the labels.
Returns:
Whether we should strip the prompts.
"""
strip_prompts = True
for label in labels_to_be_generated:
colon_tokens = tokeniser(": ", add_special_tokens=False).input_ids
label_tokens = tokeniser(": " + label, add_special_tokens=False).input_ids
if isinstance(colon_tokens, torch.Tensor):
colon_tokens = list(colon_tokens.squeeze(0))
if isinstance(label_tokens, torch.Tensor):
label_tokens = list(label_tokens.squeeze(0))
label_tokens_start_with_colon_tokens = (
label_tokens[: len(colon_tokens)] == colon_tokens
)
if label_tokens_start_with_colon_tokens:
strip_prompts = False
return strip_prompts
def should_prefix_space_be_added_to_labels(
labels_to_be_generated: list[str], tokeniser: "PreTrainedTokenizer"
) -> bool:
"""Determine if we should add a prefix space to the labels.
This is the case if the prompts are stripped and the tokeniser doesn't
automatically add prefix whitespaces to the labels.
Args:
labels_to_be_generated:
The labels that are to be generated.
tokeniser:
The tokeniser used to tokenise the labels.
Returns:
Whether we should add a prefix space to the labels.
"""
if not should_prompts_be_stripped(
labels_to_be_generated=labels_to_be_generated, tokeniser=tokeniser
):
return False
whitespace_token = tokeniser.convert_ids_to_tokens(
ids=tokeniser(" ", add_special_tokens=False).input_ids[0]
)[0]
add_prefix_space = True
for label in labels_to_be_generated:
label_tokens = tokeniser(label, add_special_tokens=False).input_ids
if isinstance(label_tokens, torch.Tensor):
label_tokens = list(label_tokens.squeeze(0))
first_label_token: int = int(label_tokens[0])
first_character_of_label = tokeniser.convert_ids_to_tokens(first_label_token)[0]
has_prefix_space = first_character_of_label == whitespace_token
if has_prefix_space:
add_prefix_space = False
break
return add_prefix_space
def get_bos_token(
tokeniser: "PreTrainedTokenizer",
) -> tuple[str, int] | tuple[None, None]:
"""Get the beginning-of-sequence token from a tokeniser.
Args:
tokeniser:
The tokeniser.
Returns:
A pair (token, token_id) representing the beginning-of-sequence token and its
token ID, or (None, None) if no BOS token is found.
"""
if isinstance(tokeniser.bos_token, str) and isinstance(tokeniser.bos_token_id, int):
return tokeniser.bos_token, tokeniser.bos_token_id
vocab: dict[str, int] = tokeniser.get_vocab()
candidate_bos_tokens = ["<s>", "<|begin_of_text|>", "<|startoftext|>", "[CLS]"]
for candidate_bos_token in candidate_bos_tokens:
if candidate_bos_token in vocab:
bos_token = candidate_bos_token
bos_token_id = vocab[bos_token]
break
else:
log_once(
"The model does not have a beginning-of-sequence token. Please ensure that "
"this has been set in the tokeniser's configuration. Using no BOS token."
" This may lead to unexpected behavior in the model.",
level=logging.INFO,
)
return None, None
log_once(
f"Beginning-of-sequence token was not set, but detected it as {bos_token!r} "
f"with ID {bos_token_id}.",
level=logging.DEBUG,
)
return bos_token, bos_token_id
def get_eos_token(
tokeniser: "PreTrainedTokenizer",
) -> tuple[str, int] | tuple[None, None]:
"""Get the end-of-sequence token from a tokeniser.
Args:
tokeniser:
The tokeniser.
Returns:
A pair (token, token_id) representing the end-of-sequence token and its token
ID, or (None, None) if no EOS token is found.
"""
if isinstance(tokeniser.eos_token, str) and isinstance(tokeniser.eos_token_id, int):
return tokeniser.eos_token, tokeniser.eos_token_id
vocab: dict[str, int] = tokeniser.get_vocab()
candidate_eos_tokens = ["</s>", "<|end_of_text|>", "<|endoftext|>", "[SEP]"]
for candidate_eos_token in candidate_eos_tokens:
if candidate_eos_token in vocab:
eos_token = candidate_eos_token
eos_token_id = vocab[eos_token]
break
else:
log_once(
"The model does not have an end-of-sequence token. Please ensure that this "
"has been set in the tokeniser's configuration. Using no EOS token. This "
"may lead to unexpected behavior in the model.",
level=logging.INFO,
)
return None, None
log_once(
f"End-of-sequence token was not set, but detected it as {eos_token!r} with "
f"ID {eos_token_id}.",
level=logging.DEBUG,
)
return eos_token, eos_token_id
def get_pad_token(
tokeniser: "PreTrainedTokenizer",
) -> tuple[str, int] | tuple[None, None]:
"""Get the padding token from a tokeniser.
Args:
tokeniser:
The tokeniser.
Returns:
A pair (token, token_id) representing the padding token and its token ID, or
(None, None) if no padding token is found.
"""
# If the tokeniser already has a padding token, return it
if tokeniser.pad_token is not None and tokeniser.pad_token_id is not None:
assert isinstance(tokeniser.pad_token, str), (
"Expected tokeniser.pad_token to be a string, but got "
f"{type(tokeniser.pad_token)}."
)
assert isinstance(tokeniser.pad_token_id, int), (
"Expected tokeniser.pad_token_id to be an integer, but got "
f"{type(tokeniser.pad_token_id)}."
)
return (tokeniser.pad_token, tokeniser.pad_token_id)
# If the tokeniser has a BOS token, use it as the padding token
if tokeniser.bos_token is not None and tokeniser.bos_token_id is not None:
assert isinstance(tokeniser.bos_token, str), (
"Expected tokeniser.bos_token to be a string, but got "
f"{type(tokeniser.bos_token)}."
)
assert isinstance(tokeniser.bos_token_id, int), (
"Expected tokeniser.bos_token_id to be an integer, but got "
f"{type(tokeniser.bos_token_id)}."
)
pad_token = tokeniser.bos_token
pad_token_id = tokeniser.bos_token_id
# If the tokeniser has an EOS token, use it as the padding token
elif tokeniser.eos_token is not None and tokeniser.eos_token_id is not None:
assert isinstance(tokeniser.eos_token, str), (
"Expected tokeniser.eos_token to be a string, but got "
f"{type(tokeniser.eos_token)}."
)
assert isinstance(tokeniser.eos_token_id, int), (
"Expected tokeniser.eos_token_id to be an integer, but got "
f"{type(tokeniser.eos_token_id)}."
)
pad_token = tokeniser.eos_token
pad_token_id = tokeniser.eos_token_id
# Otherwise, try to find a candidate padding token in the vocabulary
else:
pad_token_candidates = [
"<pad>",
"[pad]",
"<|endoftext|>",
"<|end▁of▁sentence|>",
"<|im_end|>",
]
pad_token_candidates.extend([c.upper() for c in pad_token_candidates])
for candidate in pad_token_candidates:
if candidate in tokeniser.get_vocab():
pad_token = candidate
pad_token_id = tokeniser.get_vocab()[candidate]
break
else:
log_once(
"Could not identify a padding token for the model. Please ensure that "
"this has been set in the tokeniser's configuration. Using no padding "
"token. This may lead to unexpected behavior in the model.",
level=logging.INFO,
)
return None, None
log_once(
f"Padding token was not set, but detected it as {pad_token!r} with ID "
f"{pad_token_id}.",
level=logging.DEBUG,
)
return pad_token, pad_token_id
def get_end_of_chat_token_ids(
tokeniser: "PreTrainedTokenizer", generative_type: GenerativeType | None
) -> list[int] | None:
"""Get the end token ID for chat models.
This is only relevant for tokenisers with a chat template.
Args:
tokeniser:
The tokeniser.
generative_type:
The generative type, or None if not available.
Returns:
The token IDs used to end chats, or None if the tokeniser does not have a chat
template or if no end-of-chat token could be found.
"""
if generative_type == GenerativeType.BASE:
return None
user_message: dict[str, str] = dict(role="user", content="X")
try:
token_ids = apply_chat_template(
conversation=[user_message],
tokeniser=tokeniser,
tokenise=True,
add_generation_prompt=False,
enable_thinking=generative_type == GenerativeType.REASONING,
)
except InvalidModel as e:
if "does not have a chat template" in str(e):
return None
raise e
assert isinstance(token_ids, list)
for idx, token in enumerate(tokeniser.convert_ids_to_tokens(token_ids)):
if "X" in token:
x_token_index = idx
break
else:
logger.debug("Could not locate the end-of-chat token for the model.")
return None
end_of_chat_tokens = token_ids[x_token_index + 1 :]
if len(end_of_chat_tokens) == 0:
logger.debug("Could not locate the end-of-chat token for the model.")
return None
log_once(
f"Detected end-of-chat token IDs as {end_of_chat_tokens}, corresponding to "
f"tokens {tokeniser.convert_ids_to_tokens(end_of_chat_tokens)}.",
level=logging.DEBUG,
)
return end_of_chat_tokens
def get_first_label_token_mapping(
dataset_config: "DatasetConfig",
model_config: "ModelConfig",
tokeniser: "PreTrainedTokenizer | None",
generative_type: "GenerativeType | None",
log_metadata: bool,
) -> dict[str, str] | bool:
"""Check if the model should output scores.
Args:
dataset_config:
The dataset configuration.
model_config:
The model configuration.
tokeniser:
The tokeniser, or None if not available.
generative_type:
The generative type, or None if not available.
log_metadata:
Whether to log metadata.
Returns:
A mapping from labels to the first token in each label, or alternatively a
Boolean value indicating whether the model should output scores (if the mapping
is outputted then the model will always output scores).
"""
if not (dataset_config.task.uses_logprobs and dataset_config.labels):
if log_metadata:
log_once(
"We will not use logprobs with the model, since the dataset does not "
"have labels.",
level=logging.DEBUG,
)
return False
elif generative_type == GenerativeType.REASONING:
if log_metadata:
log_once(
f"The model {model_config.model_id!r} is a reasoning model and "
"thus does not support logprobs, so we do not enable it.",
level=logging.DEBUG,
)
return False
elif tokeniser is None:
if log_metadata:
log_once(
f"We will use logprobs with the model {model_config.model_id!r} "
"since the dataset supports it and no tokeniser is available.",
level=logging.DEBUG,
)
return True
local_labels = [
dataset_config.prompt_label_mapping[label].strip()
for label in dataset_config.labels
]
# Tokenise some text containing each label, which we will use to extract the
# first token of each label
all_tokens: list[list[str]]
if not has_chat_template(tokeniser=tokeniser):
add_prefix_space = should_prefix_space_be_added_to_labels(
labels_to_be_generated=local_labels, tokeniser=tokeniser
)
all_tokens = [
tokeniser.tokenize(text=f" {label}" if add_prefix_space else label)
for label in local_labels
]
else:
all_tokens = [
tokeniser.convert_ids_to_tokens(
ids=apply_chat_template(
conversation=[
dict(role="user", content=""),
dict(role="assistant", content=label),
# Adding extra user message as Mistral tokenisers require
# conversations to end with a user message
dict(role="user", content=""),
],
tokeniser=tokeniser,
tokenise=True,
add_generation_prompt=True,
enable_thinking=generative_type == GenerativeType.REASONING,
)
)
for label in local_labels
]
# Remove any non-alphabetic characters from the tokens
all_tokens = [
[
re.sub(
pattern=r"^[^a-zæøåüöä0-9]+|[^a-zæøåüöä0-9]+$",
repl="",
string=token.lower(),
)
for token in token_list
]
for token_list in all_tokens
]
# Extract the first token of each label
first_tokens: list[str] = list()
for token_list, label in zip(all_tokens, local_labels):
matching_tokens = [tok for tok in token_list if tok and label.startswith(tok)]
if not matching_tokens:
if log_metadata:
log_once(
f"No matching token found in token_list for label '{label}', so "
"we will not use logprobs with the model.",
level=logging.DEBUG,
)
return False
first_tokens.append(matching_tokens[0])
# Build a mapping from labels to the first token in each label if the first
# tokens are distinct
if len(first_tokens) == len(set(first_tokens)):
mapping = {
label: first_token for label, first_token in zip(local_labels, first_tokens)
}
if log_metadata:
log_once(
"Using logprobs as evaluation strategy for the model, with the "
f"following mapping from labels to their first token: {mapping}.",
level=logging.DEBUG,
)
return mapping
else:
if log_metadata:
log_once(
"We will not use logprobs with the model since the first tokens of the "
"labels are not distinct. The first tokens for the labels "
f"{local_labels} are {first_tokens}"
)
return False
def has_chat_template(tokeniser: "PreTrainedTokenizer") -> bool:
"""Check if a tokeniser has a chat template.
Args:
tokeniser:
The tokeniser.
Returns:
Whether the tokeniser has a chat template.
"""
if hasattr(tokeniser, "chat_template"):
has_template = tokeniser.chat_template is not None
if has_template:
log_once(
"The tokeniser has a chat template, so assuming that the model is "
"instruction tuned.",
level=logging.DEBUG,
)
return has_template
elif isinstance(tokeniser, MistralCommonTokenizer):
log_once(
"The tokeniser is a Mistral tokeniser, so assuming that the model is "
"instruction tuned.",
level=logging.DEBUG,
)
return True
else:
log_once(
"We cannot find a chat template for the tokeniser, so assuming that the "
"model isn't instruction tuned.",
level=logging.DEBUG,
)
return False
def apply_chat_template(
conversation: list[dict[str, str]],
tokeniser: "PreTrainedTokenizer",
tokenise: bool,
add_generation_prompt: bool,
enable_thinking: bool,
**extra_kwargs,
) -> str | list[int]:
"""Apply the chat template to a prompt.
Args:
conversation:
The conversation to apply the chat template to.
tokeniser:
The tokeniser.
tokenise:
Whether to tokenise the resulting prompt, returning a list of token IDs
instead of a string.
add_generation_prompt:
Whether to add a generation prompt at the end of the conversation. This is
only relevant for regular Hugging Face tokenisers, as Mistral tokenisers
always add a generation prompt.
enable_thinking:
Whether to enable special handling for reasoning models, such as adding
special tokens for thinking. This is only relevant for regular Hugging
Face tokenisers, as Mistral tokenisers always handle reasoning models.
**extra_kwargs:
Extra keyword arguments to pass to the tokeniser's `apply_chat_template`
method. Only relevant for regular Hugging Face tokenisers.
Returns:
The prompt with the chat template applied, either as a string or a list of
token IDs, depending on the value of `tokenise`.
Raises:
InvalidModel:
If the tokeniser does not have a chat template.
"""
# Ensure that the first user message is not empty, as this can cause issues with
# Jinja2
conversation[0]["content"] = conversation[0]["content"] or " "
if not has_chat_template(tokeniser=tokeniser):
raise InvalidModel(
"The tokeniser does not have a chat template, so cannot apply it."
)
elif isinstance(tokeniser, MistralCommonTokenizer):
templated_prompt = tokeniser.apply_chat_template(
conversation=conversation, tokenize=tokenise
)
else:
templated_prompt = tokeniser.apply_chat_template(
conversation=conversation,
add_generation_prompt=add_generation_prompt,
tokenize=tokenise,
enable_thinking=enable_thinking,
**extra_kwargs,
)
return templated_prompt
|