UnslothORPOTrainer.py 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493
  1. from torch import Tensor
  2. import torch
  3. import torch.nn as nn
  4. from torch.nn import functional as F
  5. from trl.trainer.orpo_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Literal, ORPOConfig, ORPOTrainer, Optional, PartialState, PeftModel, PreTrainedModel, PreTrainedModelWrapper, PreTrainedTokenizerBase, ProcessorMixin, Trainer, TrainerCallback, Union, add_bos_token_if_needed, add_eos_token_if_needed, amp, deepcopy, defaultdict, disable_dropout_in_model, generate_model_card, get_comet_experiment_url, inspect, is_comet_available, is_peft_available, is_torch_fx_proxy, is_torch_xla_available, is_wandb_available, log_table_to_comet_experiment, maybe_apply_chat_template, maybe_extract_prompt, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_model_for_kbit_training, random, textwrap, torch, transformers, version, warnings)
  6. import os
  7. from typing import *
  8. from dataclasses import dataclass, field
  9. from packaging.version import Version
  10. import torch
  11. import numpy as np
  12. from contextlib import nullcontext
  13. from torch.nn import functional as F
  14. torch_compile_options = {
  15. "epilogue_fusion" : True,
  16. "max_autotune" : False,
  17. "shape_padding" : True,
  18. "trace.enabled" : False,
  19. "triton.cudagraphs" : False,
  20. }
  21. @torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
  22. def selective_log_softmax(logits, index):
  23. logits = logits.to(torch.float32)
  24. selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
  25. # loop to reduce peak mem consumption
  26. # logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
  27. logsumexp_values = torch.logsumexp(logits, dim = -1)
  28. per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
  29. return per_token_logps
  30. @dataclass
  31. class UnslothORPOConfig(ORPOConfig):
  32. """
  33. Configuration class for the [`ORPOTrainer`].
  34. Using [`~transformers.HfArgumentParser`] we can turn this class into
  35. [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
  36. command line.
  37. Parameters:
  38. learning_rate (`float`, *optional*, defaults to `1e-6`):
  39. Initial learning rate for [`AdamW`] optimizer. The default value replaces that of
  40. [`~transformers.TrainingArguments`].
  41. max_length (`int` or `None`, *optional*, defaults to `1024`):
  42. Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
  43. to use the default data collator.
  44. max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
  45. Maximum length of the prompt. This argument is required if you want to use the default data collator.
  46. max_completion_length (`int` or `None`, *optional*, defaults to `None`):
  47. Maximum length of the completion. This argument is required if you want to use the default data collator
  48. and your model is an encoder-decoder.
  49. beta (`float`, *optional*, defaults to `0.1`):
  50. Parameter controlling the relative ratio loss weight in the ORPO loss. In the [paper](https://huggingface.co/papers/2403.07691),
  51. it is denoted by λ. In the [code](https://github.com/xfactlab/orpo), it is denoted by `alpha`.
  52. disable_dropout (`bool`, *optional*, defaults to `True`):
  53. Whether to disable dropout in the model.
  54. label_pad_token_id (`int`, *optional*, defaults to `-100`):
  55. Label pad token id. This argument is required if you want to use the default data collator.
  56. padding_value (`int` or `None`, *optional*, defaults to `None`):
  57. Padding value to use. If `None`, the padding value of the tokenizer is used.
  58. truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
  59. Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
  60. This argument is required if you want to use the default data collator.
  61. generate_during_eval (`bool`, *optional*, defaults to `False`):
  62. If `True`, generates and logs completions from the model to W&B or Comet during evaluation.
  63. is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
  64. When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
  65. you need to specify if the model returned by the callable is an encoder-decoder model.
  66. model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
  67. Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
  68. string.
  69. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
  70. Number of processes to use for processing the dataset.
  71. """
  72. vllm_sampling_params: Optional[Any] = field(
  73. default = None,
  74. metadata = {'help': 'vLLM SamplingParams'},
  75. )
  76. unsloth_num_chunks : Optional[int] = field(
  77. default = -1,
  78. metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
  79. )
  80. def __init__(
  81. self,
  82. output_dir = None,
  83. overwrite_output_dir = None,
  84. do_train = False,
  85. do_eval = False,
  86. do_predict = False,
  87. eval_strategy = 'no',
  88. prediction_loss_only = False,
  89. per_device_train_batch_size = 4,
  90. per_device_eval_batch_size = 4,
  91. per_gpu_train_batch_size = None,
  92. per_gpu_eval_batch_size = None,
  93. gradient_accumulation_steps = 2,
  94. eval_accumulation_steps = 2,
  95. eval_delay = 0,
  96. torch_empty_cache_steps = 250,
  97. learning_rate = 5e-05,
  98. weight_decay = 0.01,
  99. adam_beta1 = 0.9,
  100. adam_beta2 = 0.999,
  101. adam_epsilon = 1e-08,
  102. max_grad_norm = 1.0,
  103. num_train_epochs = 3.0,
  104. max_steps = -1,
  105. lr_scheduler_type = 'linear',
  106. warmup_ratio = 0.1,
  107. warmup_steps = 0,
  108. log_level = 'passive',
  109. log_level_replica = 'warning',
  110. log_on_each_node = True,
  111. logging_dir = None,
  112. logging_strategy = 'steps',
  113. logging_first_step = False,
  114. logging_steps = 1,
  115. logging_nan_inf_filter = False,
  116. save_strategy = 'steps',
  117. save_steps = 500,
  118. save_total_limit = None,
  119. save_safetensors = True,
  120. save_on_each_node = False,
  121. save_only_model = False,
  122. restore_callback_states_from_checkpoint = False,
  123. no_cuda = False,
  124. use_cpu = False,
  125. use_mps_device = False,
  126. seed = 3407,
  127. data_seed = 3407,
  128. jit_mode_eval = False,
  129. use_ipex = False,
  130. bf16 = False,
  131. fp16 = False,
  132. fp16_opt_level = 'O1',
  133. half_precision_backend = 'auto',
  134. bf16_full_eval = False,
  135. fp16_full_eval = False,
  136. tf32 = None,
  137. local_rank = -1,
  138. ddp_backend = None,
  139. tpu_num_cores = None,
  140. tpu_metrics_debug = False,
  141. debug = '',
  142. dataloader_drop_last = False,
  143. eval_steps = None,
  144. dataloader_num_workers = 0,
  145. dataloader_prefetch_factor = None,
  146. past_index = -1,
  147. run_name = None,
  148. disable_tqdm = None,
  149. remove_unused_columns = True,
  150. label_names = None,
  151. load_best_model_at_end = False,
  152. metric_for_best_model = None,
  153. greater_is_better = None,
  154. ignore_data_skip = False,
  155. fsdp = '',
  156. fsdp_min_num_params = 0,
  157. fsdp_config = None,
  158. fsdp_transformer_layer_cls_to_wrap = None,
  159. accelerator_config = None,
  160. deepspeed = None,
  161. label_smoothing_factor = 0.0,
  162. optim = 'adamw_8bit',
  163. optim_args = None,
  164. adafactor = False,
  165. group_by_length = False,
  166. length_column_name = 'length',
  167. report_to = None,
  168. ddp_find_unused_parameters = None,
  169. ddp_bucket_cap_mb = None,
  170. ddp_broadcast_buffers = None,
  171. dataloader_pin_memory = True,
  172. dataloader_persistent_workers = False,
  173. skip_memory_metrics = True,
  174. use_legacy_prediction_loop = False,
  175. push_to_hub = False,
  176. resume_from_checkpoint = None,
  177. hub_model_id = None,
  178. hub_strategy = 'every_save',
  179. hub_token = None,
  180. hub_private_repo = None,
  181. hub_always_push = False,
  182. gradient_checkpointing = False,
  183. gradient_checkpointing_kwargs = None,
  184. include_inputs_for_metrics = False,
  185. eval_do_concat_batches = True,
  186. fp16_backend = 'auto',
  187. evaluation_strategy = None,
  188. push_to_hub_model_id = None,
  189. push_to_hub_organization = None,
  190. push_to_hub_token = None,
  191. mp_parameters = '',
  192. auto_find_batch_size = False,
  193. full_determinism = False,
  194. torchdynamo = None,
  195. ray_scope = 'last',
  196. ddp_timeout = 1800,
  197. torch_compile = False,
  198. torch_compile_backend = None,
  199. torch_compile_mode = None,
  200. dispatch_batches = None,
  201. split_batches = None,
  202. include_tokens_per_second = False,
  203. include_num_input_tokens_seen = False,
  204. neftune_noise_alpha = None,
  205. optim_target_modules = None,
  206. batch_eval_metrics = False,
  207. eval_on_start = False,
  208. use_liger_kernel = False,
  209. eval_use_gather_object = False,
  210. average_tokens_across_devices = False,
  211. max_length = 1024,
  212. max_prompt_length = 512,
  213. max_completion_length = None,
  214. beta = 0.1,
  215. disable_dropout = True,
  216. label_pad_token_id = -100,
  217. padding_value = None,
  218. truncation_mode = 'keep_end',
  219. generate_during_eval = False,
  220. is_encoder_decoder = None,
  221. model_init_kwargs = None,
  222. dataset_num_proc = None,
  223. vllm_sampling_params = None,
  224. unsloth_num_chunks = -1,
  225. **kwargs,
  226. ):
  227. if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
  228. if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
  229. if output_dir is None and save_strategy == 'steps' and save_steps == 500:
  230. output_dir = 'unsloth_training_checkpoints'
  231. save_strategy = 'no'
  232. if dataset_num_proc is None:
  233. from multiprocessing import cpu_count
  234. dataset_num_proc = cpu_count()
  235. super().__init__(
  236. output_dir = output_dir,
  237. overwrite_output_dir = overwrite_output_dir,
  238. do_train = do_train,
  239. do_eval = do_eval,
  240. do_predict = do_predict,
  241. eval_strategy = eval_strategy,
  242. prediction_loss_only = prediction_loss_only,
  243. per_device_train_batch_size = per_device_train_batch_size,
  244. per_device_eval_batch_size = per_device_eval_batch_size,
  245. per_gpu_train_batch_size = per_gpu_train_batch_size,
  246. per_gpu_eval_batch_size = per_gpu_eval_batch_size,
  247. gradient_accumulation_steps = gradient_accumulation_steps,
  248. eval_accumulation_steps = eval_accumulation_steps,
  249. eval_delay = eval_delay,
  250. torch_empty_cache_steps = torch_empty_cache_steps,
  251. learning_rate = learning_rate,
  252. weight_decay = weight_decay,
  253. adam_beta1 = adam_beta1,
  254. adam_beta2 = adam_beta2,
  255. adam_epsilon = adam_epsilon,
  256. max_grad_norm = max_grad_norm,
  257. num_train_epochs = num_train_epochs,
  258. max_steps = max_steps,
  259. lr_scheduler_type = lr_scheduler_type,
  260. warmup_ratio = warmup_ratio,
  261. warmup_steps = warmup_steps,
  262. log_level = log_level,
  263. log_level_replica = log_level_replica,
  264. log_on_each_node = log_on_each_node,
  265. logging_dir = logging_dir,
  266. logging_strategy = logging_strategy,
  267. logging_first_step = logging_first_step,
  268. logging_steps = logging_steps,
  269. logging_nan_inf_filter = logging_nan_inf_filter,
  270. save_strategy = save_strategy,
  271. save_steps = save_steps,
  272. save_total_limit = save_total_limit,
  273. save_safetensors = save_safetensors,
  274. save_on_each_node = save_on_each_node,
  275. save_only_model = save_only_model,
  276. restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
  277. no_cuda = no_cuda,
  278. use_cpu = use_cpu,
  279. use_mps_device = use_mps_device,
  280. seed = seed,
  281. data_seed = data_seed,
  282. jit_mode_eval = jit_mode_eval,
  283. use_ipex = use_ipex,
  284. bf16 = bf16,
  285. fp16 = fp16,
  286. fp16_opt_level = fp16_opt_level,
  287. half_precision_backend = half_precision_backend,
  288. bf16_full_eval = bf16_full_eval,
  289. fp16_full_eval = fp16_full_eval,
  290. tf32 = tf32,
  291. local_rank = local_rank,
  292. ddp_backend = ddp_backend,
  293. tpu_num_cores = tpu_num_cores,
  294. tpu_metrics_debug = tpu_metrics_debug,
  295. debug = debug,
  296. dataloader_drop_last = dataloader_drop_last,
  297. eval_steps = eval_steps,
  298. dataloader_num_workers = dataloader_num_workers,
  299. dataloader_prefetch_factor = dataloader_prefetch_factor,
  300. past_index = past_index,
  301. run_name = run_name,
  302. disable_tqdm = disable_tqdm,
  303. remove_unused_columns = remove_unused_columns,
  304. label_names = label_names,
  305. load_best_model_at_end = load_best_model_at_end,
  306. metric_for_best_model = metric_for_best_model,
  307. greater_is_better = greater_is_better,
  308. ignore_data_skip = ignore_data_skip,
  309. fsdp = fsdp,
  310. fsdp_min_num_params = fsdp_min_num_params,
  311. fsdp_config = fsdp_config,
  312. fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
  313. accelerator_config = accelerator_config,
  314. deepspeed = deepspeed,
  315. label_smoothing_factor = label_smoothing_factor,
  316. optim = optim,
  317. optim_args = optim_args,
  318. adafactor = adafactor,
  319. group_by_length = group_by_length,
  320. length_column_name = length_column_name,
  321. report_to = report_to,
  322. ddp_find_unused_parameters = ddp_find_unused_parameters,
  323. ddp_bucket_cap_mb = ddp_bucket_cap_mb,
  324. ddp_broadcast_buffers = ddp_broadcast_buffers,
  325. dataloader_pin_memory = dataloader_pin_memory,
  326. dataloader_persistent_workers = dataloader_persistent_workers,
  327. skip_memory_metrics = skip_memory_metrics,
  328. use_legacy_prediction_loop = use_legacy_prediction_loop,
  329. push_to_hub = push_to_hub,
  330. resume_from_checkpoint = resume_from_checkpoint,
  331. hub_model_id = hub_model_id,
  332. hub_strategy = hub_strategy,
  333. hub_token = hub_token,
  334. hub_private_repo = hub_private_repo,
  335. hub_always_push = hub_always_push,
  336. gradient_checkpointing = gradient_checkpointing,
  337. gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
  338. include_inputs_for_metrics = include_inputs_for_metrics,
  339. eval_do_concat_batches = eval_do_concat_batches,
  340. fp16_backend = fp16_backend,
  341. evaluation_strategy = evaluation_strategy,
  342. push_to_hub_model_id = push_to_hub_model_id,
  343. push_to_hub_organization = push_to_hub_organization,
  344. push_to_hub_token = push_to_hub_token,
  345. mp_parameters = mp_parameters,
  346. auto_find_batch_size = auto_find_batch_size,
  347. full_determinism = full_determinism,
  348. torchdynamo = torchdynamo,
  349. ray_scope = ray_scope,
  350. ddp_timeout = ddp_timeout,
  351. torch_compile = torch_compile,
  352. torch_compile_backend = torch_compile_backend,
  353. torch_compile_mode = torch_compile_mode,
  354. dispatch_batches = dispatch_batches,
  355. split_batches = split_batches,
  356. include_tokens_per_second = include_tokens_per_second,
  357. include_num_input_tokens_seen = include_num_input_tokens_seen,
  358. neftune_noise_alpha = neftune_noise_alpha,
  359. optim_target_modules = optim_target_modules,
  360. batch_eval_metrics = batch_eval_metrics,
  361. eval_on_start = eval_on_start,
  362. use_liger_kernel = use_liger_kernel,
  363. eval_use_gather_object = eval_use_gather_object,
  364. average_tokens_across_devices = average_tokens_across_devices,
  365. max_length = max_length,
  366. max_prompt_length = max_prompt_length,
  367. max_completion_length = max_completion_length,
  368. beta = beta,
  369. disable_dropout = disable_dropout,
  370. label_pad_token_id = label_pad_token_id,
  371. padding_value = padding_value,
  372. truncation_mode = truncation_mode,
  373. generate_during_eval = generate_during_eval,
  374. is_encoder_decoder = is_encoder_decoder,
  375. model_init_kwargs = model_init_kwargs,
  376. dataset_num_proc = dataset_num_proc,**kwargs)
  377. self.vllm_sampling_params = vllm_sampling_params
  378. self.unsloth_num_chunks = unsloth_num_chunks
  379. pass
  380. class _UnslothORPOTrainer(Trainer):
  381. r""""""
  382. _tag_names = ["trl", "orpo"]
  383. def __init__(
  384. self,
  385. model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
  386. args: Optional[ORPOConfig] = None,
  387. data_collator: Optional[DataCollator] = None,
  388. train_dataset: Optional[Dataset] = None,
  389. eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
  390. processing_class: Optional[
  391. Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
  392. ] = None,
  393. model_init: Optional[Callable[[], PreTrainedModel]] = None,
  394. callbacks: Optional[list[TrainerCallback]] = None,
  395. optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
  396. preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
  397. peft_config: Optional[dict] = None,
  398. compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
  399. ):
  400. if args.model_init_kwargs is None:
  401. model_init_kwargs = {}
  402. elif not isinstance(model, str):
  403. raise ValueError("You passed model_kwargs to the ORPOTrainer. But your model is already instantiated.")
  404. else:
  405. model_init_kwargs = args.model_init_kwargs
  406. torch_dtype = model_init_kwargs.get("torch_dtype")
  407. if torch_dtype is not None:
  408. # Convert to `torch.dtype` if an str is passed
  409. if isinstance(torch_dtype, str) and torch_dtype != "auto":
  410. torch_dtype = getattr(torch, torch_dtype)
  411. if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
  412. raise ValueError(
  413. f"Invalid `torch_dtype` passed to the ORPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
  414. )
  415. model_init_kwargs["torch_dtype"] = torch_dtype
  416. if isinstance(model, str):
  417. model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
  418. # Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
  419. # has been called in order to properly call autocast if needed.
  420. self._peft_has_been_casted_to_bf16 = False
  421. if not is_peft_available() and peft_config is not None:
  422. raise ValueError(
  423. "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
  424. )
  425. elif is_peft_available() and peft_config is not None:
  426. # if model is a peft model and we have a peft_config, we merge and unload it first
  427. if isinstance(model, PeftModel):
  428. model = model.merge_and_unload()
  429. if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
  430. _support_gc_kwargs = hasattr(
  431. args, "gradient_checkpointing_kwargs"
  432. ) and "gradient_checkpointing_kwargs" in list(
  433. inspect.signature(prepare_model_for_kbit_training).parameters
  434. )
  435. prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
  436. if _support_gc_kwargs:
  437. prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
  438. model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
  439. elif getattr(args, "gradient_checkpointing", False):
  440. # For backward compatibility with older versions of transformers
  441. if hasattr(model, "enable_input_require_grads"):
  442. model.enable_input_require_grads()
  443. else:
  444. def make_inputs_require_grad(module, input, output):
  445. output.requires_grad_(True)
  446. model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
  447. # get peft model with the given config
  448. model = model
  449. if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
  450. peft_module_casting_to_bf16(model)
  451. # If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
  452. self._peft_has_been_casted_to_bf16 = True
  453. # For models that use gradient_checkpointing, we need to attach a hook that enables input
  454. # to explicitly have `requires_grad=True`, otherwise training will either silently
  455. # fail or completely fail.
  456. elif getattr(args, "gradient_checkpointing", False):
  457. # For backward compatibility with older versions of transformers
  458. if hasattr(model, "enable_input_require_grads"):
  459. model.enable_input_require_grads()
  460. else:
  461. def make_inputs_require_grad(module, input, output):
  462. output.requires_grad_(True)
  463. model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
  464. if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
  465. raise ValueError(
  466. "`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
  467. " Please install `wandb` or `comet-ml` to resolve."
  468. )
  469. if model is not None:
  470. self.is_encoder_decoder = model.config.is_encoder_decoder
  471. elif args.is_encoder_decoder is None:
  472. raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
  473. else:
  474. self.is_encoder_decoder = args.is_encoder_decoder
  475. if self.is_encoder_decoder:
  476. self.decoder_start_token_id = model.config.decoder_start_token_id
  477. self.pad_token_id = model.config.pad_token_id
  478. if processing_class is None:
  479. raise ValueError("processing_class must be specified to tokenize a ORPO dataset.")
  480. if args.max_length is None:
  481. warnings.warn(
  482. "`max_length` is not set in the ORPOConfig's init"
  483. " it will default to `512` by default, but you should do it yourself in the future.",
  484. UserWarning,
  485. )
  486. max_length = 512
  487. else:
  488. max_length = args.max_length
  489. if args.max_prompt_length is None:
  490. warnings.warn(
  491. "`max_prompt_length` is not set in the ORPOConfig's init"
  492. " it will default to `128` by default, but you should do it yourself in the future.",
  493. UserWarning,
  494. )
  495. max_prompt_length = 128
  496. else:
  497. max_prompt_length = args.max_prompt_length
  498. if args.max_completion_length is None and self.is_encoder_decoder:
  499. warnings.warn(
  500. "When using an encoder decoder architecture, you should set `max_completion_length` in the ORPOConfig's init"
  501. " it will default to `128` by default, but you should do it yourself in the future.",
  502. UserWarning,
  503. )
  504. self.max_completion_length = 128
  505. else:
  506. self.max_completion_length = args.max_completion_length
  507. if data_collator is None:
  508. data_collator = DPODataCollatorWithPadding(
  509. pad_token_id=processing_class.pad_token_id,
  510. label_pad_token_id=args.label_pad_token_id,
  511. is_encoder_decoder=self.is_encoder_decoder,
  512. )
  513. if args.remove_unused_columns:
  514. args.remove_unused_columns = False
  515. # warn users
  516. warnings.warn(
  517. "When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments"
  518. " we have set it for you, but you should do it yourself in the future.",
  519. UserWarning,
  520. )
  521. self.use_dpo_data_collator = True
  522. else:
  523. self.use_dpo_data_collator = False
  524. # Disable dropout in the model and reference model
  525. if args.disable_dropout:
  526. disable_dropout_in_model(model)
  527. self.max_length = max_length
  528. self.generate_during_eval = args.generate_during_eval
  529. self.label_pad_token_id = args.label_pad_token_id
  530. self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
  531. self.max_prompt_length = max_prompt_length
  532. self.truncation_mode = args.truncation_mode
  533. self.processing_class = processing_class
  534. self.beta = args.beta
  535. self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
  536. self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
  537. if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
  538. warnings.warn(
  539. "You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
  540. "`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
  541. "greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
  542. "loss.",
  543. UserWarning,
  544. )
  545. self._stored_metrics = defaultdict(lambda: defaultdict(list))
  546. # The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
  547. # input tensor associated with the key "input_ids". However, in ORPO, the sampled data does not include the
  548. # "input_ids" key. Instead, the available keys are "prompt_input_ids", "chosen_input_ids", and
  549. # "rejected_input_ids". As a result, the trainer issues the warning: "Could not estimate the number of tokens
  550. # of the input, floating-point operations will not be computed." To suppress this warning, we set the
  551. # "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate
  552. # that the warning has already been issued.
  553. model.warnings_issued["estimate_tokens"] = True
  554. # Compute that only on the main process for faster data processing.
  555. # see: https://github.com/huggingface/trl/pull/1255
  556. with PartialState().local_main_process_first():
  557. # Extract the prompt if needed, and apply the chat template if needed
  558. train_dataset = train_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
  559. train_dataset = train_dataset.map(
  560. maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc
  561. )
  562. train_dataset = train_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
  563. if eval_dataset is not None:
  564. eval_dataset = eval_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
  565. eval_dataset = eval_dataset.map(
  566. maybe_apply_chat_template,
  567. fn_kwargs={"tokenizer": processing_class},
  568. num_proc=args.dataset_num_proc,
  569. )
  570. eval_dataset = eval_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
  571. super().__init__(
  572. model=model,
  573. args=args,
  574. data_collator=data_collator,
  575. train_dataset=train_dataset,
  576. eval_dataset=eval_dataset,
  577. processing_class=processing_class,
  578. model_init=model_init,
  579. compute_metrics=compute_metrics,
  580. callbacks=callbacks,
  581. optimizers=optimizers,
  582. preprocess_logits_for_metrics=preprocess_logits_for_metrics,
  583. )
  584. # Add tags for models that have been loaded with the correct transformers version
  585. if hasattr(self.model, "add_model_tags"):
  586. self.model.add_model_tags(self._tag_names)
  587. if not hasattr(self, "accelerator"):
  588. raise AttributeError(
  589. "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
  590. )
  591. def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
  592. # Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
  593. deepspeed_plugin = self.accelerator.state.deepspeed_plugin
  594. config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config)
  595. if model is not None:
  596. if hasattr(model, "config"):
  597. hidden_size = (
  598. max(model.config.hidden_sizes)
  599. if getattr(model.config, "hidden_sizes", None)
  600. else getattr(model.config, "hidden_size", None)
  601. )
  602. if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
  603. # Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
  604. # This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
  605. config_kwargs.update(
  606. {
  607. "zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
  608. "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
  609. "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
  610. }
  611. )
  612. # If ZeRO-3 is used, we shard both the active and reference model.
  613. # Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
  614. if config_kwargs["zero_optimization"]["stage"] != 3:
  615. config_kwargs["zero_optimization"]["stage"] = 0
  616. model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
  617. model.eval()
  618. return model
  619. def build_tokenized_answer(self, prompt, answer):
  620. """
  621. Llama tokenizer does satisfy `enc(a + b) = enc(a) + enc(b)`.
  622. It does ensure `enc(a + b) = enc(a) + enc(a + b)[len(enc(a)):]`.
  623. Reference:
  624. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
  625. """
  626. full_tokenized = self.processing_class(prompt + answer, add_special_tokens=False)
  627. prompt_input_ids = self.processing_class(prompt, add_special_tokens=False)["input_ids"]
  628. answer_input_ids = full_tokenized["input_ids"][len(prompt_input_ids) :]
  629. answer_attention_mask = full_tokenized["attention_mask"][len(prompt_input_ids) :]
  630. # Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]`
  631. full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids])
  632. # Prepare input tokens for token by token comparison
  633. full_input_ids = np.array(full_tokenized["input_ids"])
  634. if len(full_input_ids) != len(full_concat_input_ids):
  635. raise ValueError("Prompt input ids and answer input ids should have the same length.")
  636. # On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens
  637. # can be merged together when tokenizing prompt+answer. This could result
  638. # on the last token from the prompt being different when tokenized on its own
  639. # vs when done as prompt+answer.
  640. response_token_ids_start_idx = len(prompt_input_ids)
  641. # If tokenized prompt is different than both prompt+answer, then it means the
  642. # last token has changed due to merging.
  643. if prompt_input_ids != full_tokenized["input_ids"][:response_token_ids_start_idx]:
  644. response_token_ids_start_idx -= 1
  645. prompt_input_ids = full_tokenized["input_ids"][:response_token_ids_start_idx]
  646. prompt_attention_mask = full_tokenized["attention_mask"][:response_token_ids_start_idx]
  647. if len(prompt_input_ids) != len(prompt_attention_mask):
  648. raise ValueError("Prompt input ids and attention mask should have the same length.")
  649. answer_input_ids = full_tokenized["input_ids"][response_token_ids_start_idx:]
  650. answer_attention_mask = full_tokenized["attention_mask"][response_token_ids_start_idx:]
  651. return dict(
  652. prompt_input_ids=prompt_input_ids,
  653. prompt_attention_mask=prompt_attention_mask,
  654. input_ids=answer_input_ids,
  655. attention_mask=answer_attention_mask,
  656. )
  657. def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]] = None) -> dict:
  658. """Tokenize a single row from a ORPO specific dataset.
  659. At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
  660. in case the prompt + chosen or prompt + rejected responses is/are too long. First
  661. we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
  662. We also create the labels for the chosen/rejected responses, which are of length equal to
  663. the sum of the length of the prompt and the chosen/rejected response, with
  664. label_pad_token_id for the prompt tokens.
  665. """
  666. batch = {}
  667. prompt = feature["prompt"]
  668. chosen = feature["chosen"]
  669. rejected = feature["rejected"]
  670. if not self.is_encoder_decoder:
  671. # Check issues below for more details
  672. # 1. https://github.com/huggingface/trl/issues/907
  673. # 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
  674. # 3. https://github.com/LianjiaTech/BELLE/issues/337
  675. if not isinstance(prompt, str):
  676. raise ValueError(f"prompt should be an str but got {type(prompt)}")
  677. prompt_tokens = self.processing_class(prompt, add_special_tokens=False)
  678. prompt_tokens = {f"prompt_{k}": v for k, v in prompt_tokens.items()}
  679. if not isinstance(chosen, str):
  680. raise ValueError(f"chosen should be an str but got {type(chosen)}")
  681. chosen_tokens = self.build_tokenized_answer(prompt, chosen)
  682. if not isinstance(rejected, str):
  683. raise ValueError(f"rejected should be an str but got {type(rejected)}")
  684. rejected_tokens = self.build_tokenized_answer(prompt, rejected)
  685. # Last prompt token might get merged by tokenizer and
  686. # it should not be included for generation if that happens
  687. prompt_len_input_ids = len(prompt_tokens["prompt_input_ids"])
  688. chosen_prompt_len_input_ids = len(chosen_tokens["prompt_input_ids"])
  689. rejected_prompt_len_input_ids = len(rejected_tokens["prompt_input_ids"])
  690. prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids)
  691. for k, v in prompt_tokens.items():
  692. prompt_tokens[k] = v[:prompt_len_input_ids]
  693. # Make sure prompts only have one different token at most an
  694. # and length only differs by 1 at most
  695. num_diff_tokens = sum(
  696. [a != b for a, b in zip(chosen_tokens["prompt_input_ids"], rejected_tokens["prompt_input_ids"])]
  697. )
  698. num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids)
  699. if num_diff_tokens > 1 or num_diff_len > 1:
  700. raise ValueError(
  701. "Chosen and rejected prompt_input_ids might only differ on the "
  702. "last token due to tokenizer merge ops."
  703. )
  704. # add BOS token to head of prompt. Avoid adding if it's already there
  705. prompt_tokens, chosen_tokens, rejected_tokens = add_bos_token_if_needed(
  706. self.processing_class.bos_token_id,
  707. prompt_len_input_ids,
  708. prompt_tokens,
  709. chosen_prompt_len_input_ids,
  710. chosen_tokens,
  711. rejected_prompt_len_input_ids,
  712. rejected_tokens,
  713. )
  714. # add EOS token to end of answer. Avoid adding if it's already there
  715. chosen_tokens, rejected_tokens = add_eos_token_if_needed(
  716. self.processing_class.eos_token_id, chosen_tokens, rejected_tokens
  717. )
  718. longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"]))
  719. # if combined sequence is too long, truncate the prompt
  720. for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]:
  721. if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
  722. if self.truncation_mode == "keep_start":
  723. for k in ["prompt_input_ids", "prompt_attention_mask"]:
  724. answer_tokens[k] = answer_tokens[k][: self.max_prompt_length]
  725. elif self.truncation_mode == "keep_end":
  726. for k in ["prompt_input_ids", "prompt_attention_mask"]:
  727. answer_tokens[k] = answer_tokens[k][-self.max_prompt_length :]
  728. else:
  729. raise ValueError(f"Unknown truncation mode: {self.truncation_mode}")
  730. # if that's still too long, truncate the response
  731. for answer_tokens in [chosen_tokens, rejected_tokens]:
  732. if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
  733. for k in ["input_ids", "attention_mask"]:
  734. answer_tokens[k] = answer_tokens[k][: self.max_length - self.max_prompt_length]
  735. # Create labels
  736. chosen_sequence_tokens = {
  737. k: chosen_tokens[f"prompt_{k}"] + chosen_tokens[k] for k in ["input_ids", "attention_mask"]
  738. }
  739. rejected_sequence_tokens = {
  740. k: rejected_tokens[f"prompt_{k}"] + rejected_tokens[k] for k in ["input_ids", "attention_mask"]
  741. }
  742. chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:]
  743. chosen_sequence_tokens["labels"][: len(chosen_tokens["prompt_input_ids"])] = [
  744. self.label_pad_token_id
  745. ] * len(chosen_tokens["prompt_input_ids"])
  746. rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:]
  747. rejected_sequence_tokens["labels"][: len(rejected_tokens["prompt_input_ids"])] = [
  748. self.label_pad_token_id
  749. ] * len(rejected_tokens["prompt_input_ids"])
  750. for k, toks in {
  751. "chosen_": chosen_sequence_tokens,
  752. "rejected_": rejected_sequence_tokens,
  753. "": prompt_tokens,
  754. }.items():
  755. for type_key, tokens in toks.items():
  756. if type_key == "token_type_ids":
  757. continue
  758. batch[f"{k}{type_key}"] = tokens
  759. else:
  760. chosen_tokens = self.processing_class(
  761. chosen, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
  762. )
  763. rejected_tokens = self.processing_class(
  764. rejected, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
  765. )
  766. prompt_tokens = self.processing_class(
  767. prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True
  768. )
  769. batch["chosen_labels"] = chosen_tokens["input_ids"]
  770. batch["rejected_labels"] = rejected_tokens["input_ids"]
  771. batch["prompt_input_ids"] = prompt_tokens["input_ids"]
  772. batch["prompt_attention_mask"] = prompt_tokens["attention_mask"]
  773. if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
  774. batch["rejected_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
  775. labels=torch.tensor(batch["rejected_labels"])
  776. )
  777. batch["chosen_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
  778. labels=torch.tensor(batch["chosen_labels"])
  779. )
  780. if is_torch_xla_available():
  781. # Pad the sequences to global max_length to avoid TorchXLA recompilation
  782. for k in batch:
  783. if "labels" in k or self.is_encoder_decoder:
  784. pad_value = self.label_pad_token_id
  785. elif k.endswith("_input_ids"):
  786. pad_value = self.padding_value
  787. elif k.endswith("_attention_mask"):
  788. pad_value = 0
  789. batch[k] = batch[k] + [pad_value] * (self.max_length - len(batch[k]))
  790. return batch
  791. @staticmethod
  792. def concatenated_inputs(
  793. batch: dict[str, Union[list, torch.LongTensor]],
  794. is_encoder_decoder: bool = False,
  795. label_pad_token_id: int = -100,
  796. padding_value: int = 0,
  797. device: Optional[torch.device] = None,
  798. ) -> dict[str, torch.LongTensor]:
  799. """Concatenate the chosen and rejected inputs into a single tensor.
  800. Args:
  801. batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length).
  802. is_encoder_decoder: Whether the model is an encoder-decoder model.
  803. label_pad_token_id: The label pad token id.
  804. padding_value: The padding value to use for the concatenated inputs_ids.
  805. device: The device for the concatenated inputs.
  806. Returns:
  807. A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.
  808. """
  809. concatenated_batch = {}
  810. if is_encoder_decoder:
  811. max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1])
  812. else:
  813. max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1])
  814. for k in batch:
  815. if k.startswith("chosen") and isinstance(batch[k], torch.Tensor):
  816. if "labels" in k or is_encoder_decoder:
  817. pad_value = label_pad_token_id
  818. elif k.endswith("_input_ids"):
  819. pad_value = padding_value
  820. elif k.endswith("_attention_mask"):
  821. pad_value = 0
  822. concatenated_key = k.replace("chosen", "concatenated")
  823. concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)
  824. for k in batch:
  825. if k.startswith("rejected") and isinstance(batch[k], torch.Tensor):
  826. if "labels" in k or is_encoder_decoder:
  827. pad_value = label_pad_token_id
  828. elif k.endswith("_input_ids"):
  829. pad_value = padding_value
  830. elif k.endswith("_attention_mask"):
  831. pad_value = 0
  832. concatenated_key = k.replace("rejected", "concatenated")
  833. concatenated_batch[concatenated_key] = torch.cat(
  834. (
  835. concatenated_batch[concatenated_key],
  836. pad_to_length(batch[k], max_length, pad_value=pad_value),
  837. ),
  838. dim=0,
  839. ).to(device=device)
  840. if is_encoder_decoder:
  841. concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1).to(device=device)
  842. concatenated_batch["concatenated_attention_mask"] = (
  843. batch["prompt_attention_mask"].repeat(2, 1).to(device=device)
  844. )
  845. return concatenated_batch
  846. def odds_ratio_loss(
  847. self,
  848. policy_chosen_logps: torch.FloatTensor,
  849. policy_rejected_logps: torch.FloatTensor,
  850. ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
  851. """Compute ORPO's odds ratio (OR) loss for a batch of policy and reference model log probabilities.
  852. Args:
  853. policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
  854. policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
  855. Returns:
  856. A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).
  857. The losses tensor contains the ORPO loss for each example in the batch.
  858. The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
  859. The log odds ratio of the chosen responses over the rejected responses ratio for logging purposes.
  860. The `log(sigmoid(log_odds_chosen))` for logging purposes.
  861. """
  862. # Derived from Eqs. (4) and (7) from https://huggingface.co/papers/2403.07691 by using log identities and exp(log(P(y|x)) = P(y|x)
  863. log_odds = (policy_chosen_logps - policy_rejected_logps) - (
  864. torch.log1p(-torch.exp(policy_chosen_logps)) - torch.log1p(-torch.exp(policy_rejected_logps))
  865. )
  866. ratio = F.logsigmoid(log_odds)
  867. losses = self.beta * ratio
  868. chosen_rewards = self.beta * (policy_chosen_logps.to(self.accelerator.device)).detach()
  869. rejected_rewards = self.beta * (policy_rejected_logps.to(self.accelerator.device)).detach()
  870. return losses, chosen_rewards, rejected_rewards, torch.mean(ratio), torch.mean(log_odds)
  871. @staticmethod
  872. def get_batch_logps(
  873. logits: torch.FloatTensor,
  874. labels: torch.LongTensor,
  875. average_log_prob: bool = False,
  876. label_pad_token_id: int = -100,
  877. is_encoder_decoder: bool = False,
  878. ) -> torch.FloatTensor:
  879. """Compute the log probabilities of the given labels under the given logits.
  880. Args:
  881. logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
  882. labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
  883. average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.
  884. label_pad_token_id: The label pad token id.
  885. is_encoder_decoder: Whether the model is an encoder-decoder model.
  886. Returns:
  887. A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
  888. """
  889. if logits.shape[:-1] != labels.shape:
  890. raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
  891. if not is_encoder_decoder:
  892. labels = labels[:, 1:].clone()
  893. logits = logits[:, :-1, :]
  894. loss_mask = labels != label_pad_token_id
  895. # dummy token; we'll ignore the losses on these tokens later
  896. labels = torch.where(labels == label_pad_token_id, 0, labels)
  897. per_token_logps = selective_log_softmax(logits, labels)
  898. if average_log_prob:
  899. return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
  900. else:
  901. return (per_token_logps * loss_mask).sum(-1)
  902. def concatenated_forward(
  903. self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
  904. ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
  905. """Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
  906. We do this to avoid doing two forward passes, because it's faster for FSDP.
  907. """
  908. concatenated_batch = self.concatenated_inputs(
  909. batch,
  910. is_encoder_decoder=self.is_encoder_decoder,
  911. label_pad_token_id=self.label_pad_token_id,
  912. padding_value=self.padding_value,
  913. device=self.accelerator.device,
  914. )
  915. len_chosen = batch["chosen_labels"].shape[0]
  916. model_kwargs = (
  917. {
  918. "decoder_input_ids": self._shift_right(concatenated_batch["concatenated_labels"]),
  919. }
  920. if self.is_encoder_decoder
  921. else {}
  922. )
  923. if self.aux_loss_enabled:
  924. model_kwargs["output_router_logits"] = True
  925. outputs = model(
  926. concatenated_batch["concatenated_input_ids"],
  927. attention_mask=concatenated_batch["concatenated_attention_mask"],
  928. use_cache=False,
  929. **model_kwargs,
  930. )
  931. all_logits = outputs.logits
  932. def cross_entropy_loss(logits, labels):
  933. if not self.is_encoder_decoder:
  934. # Shift so that tokens < n predict n
  935. logits = logits[..., :-1, :].contiguous()
  936. labels = labels[..., 1:].contiguous()
  937. # Flatten the tokens
  938. loss_fct = nn.CrossEntropyLoss()
  939. logits = logits.view(-1, logits.shape[-1])
  940. labels = labels.view(-1)
  941. # Enable model parallelism
  942. labels = labels.to(logits.device)
  943. loss = loss_fct(logits, labels)
  944. return loss
  945. if self.is_encoder_decoder:
  946. labels = concatenated_batch["concatenated_labels"].clone()
  947. else:
  948. labels = concatenated_batch["concatenated_input_ids"].clone()
  949. attention_mask = concatenated_batch["concatenated_attention_mask"]
  950. labels = torch.where(attention_mask == 1, labels, self.label_pad_token_id)
  951. # orpo chosen nll loss is computed over the full prompt and response
  952. chosen_nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen])
  953. all_logps = self.get_batch_logps(
  954. all_logits,
  955. concatenated_batch["concatenated_labels"],
  956. average_log_prob=True,
  957. is_encoder_decoder=self.is_encoder_decoder,
  958. label_pad_token_id=self.label_pad_token_id,
  959. )
  960. chosen_logps = all_logps[:len_chosen]
  961. rejected_logps = all_logps[len_chosen:]
  962. if not self.is_encoder_decoder:
  963. chosen_logits = all_logits[:len_chosen, :-1, :]
  964. rejected_logits = all_logits[len_chosen:, :-1, :]
  965. else:
  966. chosen_logits = all_logits[:len_chosen]
  967. rejected_logits = all_logits[len_chosen:]
  968. if self.aux_loss_enabled:
  969. return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_nll_loss, outputs.aux_loss)
  970. return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_nll_loss)
  971. def get_batch_loss_metrics(
  972. self,
  973. model,
  974. batch: dict[str, Union[list, torch.LongTensor]],
  975. train_eval: Literal["train", "eval"] = "train",
  976. ):
  977. """Compute the ORPO loss and other metrics for the given batch of inputs for train or test."""
  978. metrics = {}
  979. forward_output = self.concatenated_forward(model, batch)
  980. (
  981. policy_chosen_logps,
  982. policy_rejected_logps,
  983. policy_chosen_logits,
  984. policy_rejected_logits,
  985. policy_nll_loss,
  986. ) = forward_output[:5]
  987. if self.aux_loss_enabled:
  988. aux_loss = forward_output[5]
  989. losses, chosen_rewards, rejected_rewards, log_odds_ratio, log_odds_chosen = self.odds_ratio_loss(
  990. policy_chosen_logps, policy_rejected_logps
  991. )
  992. # full ORPO loss
  993. loss = policy_nll_loss - losses.mean()
  994. reward_accuracies = (chosen_rewards > rejected_rewards).float()
  995. prefix = "eval_" if train_eval == "eval" else ""
  996. metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean()
  997. metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean()
  998. metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean()
  999. metrics[f"{prefix}rewards/margins"] = self.accelerator.gather_for_metrics(
  1000. chosen_rewards - rejected_rewards
  1001. ).mean()
  1002. metrics[f"{prefix}logps/rejected"] = self.accelerator.gather_for_metrics(policy_rejected_logps).detach().mean()
  1003. metrics[f"{prefix}logps/chosen"] = self.accelerator.gather_for_metrics(policy_chosen_logps).detach().mean()
  1004. metrics[f"{prefix}logits/rejected"] = (
  1005. self.accelerator.gather_for_metrics(policy_rejected_logits).detach().mean()
  1006. )
  1007. metrics[f"{prefix}logits/chosen"] = self.accelerator.gather_for_metrics(policy_chosen_logits).detach().mean()
  1008. metrics[f"{prefix}nll_loss"] = self.accelerator.gather_for_metrics(policy_nll_loss).detach().mean()
  1009. metrics[f"{prefix}log_odds_ratio"] = self.accelerator.gather_for_metrics(log_odds_ratio).mean()
  1010. metrics[f"{prefix}log_odds_chosen"] = self.accelerator.gather_for_metrics(log_odds_chosen).mean()
  1011. if is_torch_xla_available():
  1012. xm.mark_step() # needed because .item() calls
  1013. for k, v in metrics.items():
  1014. metrics[k] = v.item()
  1015. if self.aux_loss_enabled:
  1016. loss += self.aux_loss_coef * aux_loss
  1017. return loss, metrics
  1018. def compute_loss(
  1019. self,
  1020. model: Union[PreTrainedModel, nn.Module],
  1021. inputs: dict[str, Union[torch.Tensor, Any]],
  1022. return_outputs=False,
  1023. num_items_in_batch=None,
  1024. ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
  1025. compute_loss_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
  1026. with compute_loss_context_manager:
  1027. loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train")
  1028. # Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class:
  1029. loss = loss.to(self.args.device)
  1030. # force log the metrics
  1031. self.store_metrics(metrics, train_eval="train")
  1032. if return_outputs:
  1033. return (loss, metrics)
  1034. return loss
  1035. def generate_from_model(self, model, batch: dict[str, torch.LongTensor]) -> str:
  1036. """Generate samples from the model and reference model for the given batch of inputs."""
  1037. # If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
  1038. # the torch cuda amp context manager as some hidden states are silently casted to full precision.
  1039. generate_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
  1040. with generate_context_manager:
  1041. policy_output = model.generate(
  1042. input_ids=batch["prompt_input_ids"],
  1043. attention_mask=batch["prompt_attention_mask"],
  1044. max_length=self.max_length,
  1045. do_sample=True,
  1046. pad_token_id=self.processing_class.pad_token_id,
  1047. )
  1048. policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
  1049. policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
  1050. return policy_output_decoded
  1051. def prediction_step(
  1052. self,
  1053. model: Union[PreTrainedModel, nn.Module],
  1054. inputs: dict[str, Union[torch.Tensor, Any]],
  1055. prediction_loss_only: bool,
  1056. ignore_keys: Optional[list[str]] = None,
  1057. ):
  1058. if not self.use_dpo_data_collator:
  1059. warnings.warn(
  1060. "prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than "
  1061. "DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator"
  1062. )
  1063. if ignore_keys is None:
  1064. if hasattr(model, "config"):
  1065. ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
  1066. else:
  1067. ignore_keys = []
  1068. prediction_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
  1069. with torch.no_grad(), prediction_context_manager:
  1070. loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval")
  1071. # force log the metrics
  1072. self.store_metrics(metrics, train_eval="eval")
  1073. if prediction_loss_only:
  1074. return (loss.detach(), None, None)
  1075. # logits for the chosen and rejected samples from model
  1076. logits_dict = {
  1077. "eval_logits/chosen": metrics["eval_logits/chosen"],
  1078. "eval_logits/rejected": metrics["eval_logits/rejected"],
  1079. }
  1080. logits = tuple(v.unsqueeze(dim=0) for k, v in logits_dict.items() if k not in ignore_keys)
  1081. logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device)
  1082. labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
  1083. return (loss.detach(), logits, labels)
  1084. def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
  1085. for key, value in metrics.items():
  1086. self._stored_metrics[train_eval][key].append(value)
  1087. def evaluation_loop(
  1088. self,
  1089. dataloader: DataLoader,
  1090. description: str,
  1091. prediction_loss_only: Optional[bool] = None,
  1092. ignore_keys: Optional[list[str]] = None,
  1093. metric_key_prefix: str = "eval",
  1094. ) -> EvalLoopOutput:
  1095. """
  1096. Overriding built-in evaluation loop to store metrics for each batch.
  1097. Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
  1098. Works both with or without labels.
  1099. """
  1100. # Sample and save to game log if requested (for one batch to save time)
  1101. if self.generate_during_eval:
  1102. # Generate random indices within the range of the total number of samples
  1103. num_samples = len(dataloader.dataset)
  1104. random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
  1105. # Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
  1106. random_batch_dataset = dataloader.dataset.select(random_indices)
  1107. random_batch = self.data_collator(random_batch_dataset)
  1108. random_batch = self._prepare_inputs(random_batch)
  1109. policy_output_decoded = self.generate_from_model(self.model, random_batch)
  1110. table = pd.DataFrame(
  1111. columns=["Prompt", "Policy"],
  1112. data=[
  1113. [prompt, pol[len(prompt) :]] for prompt, pol in zip(random_batch["prompt"], policy_output_decoded)
  1114. ],
  1115. )
  1116. if "wandb" in self.args.report_to:
  1117. wandb.log({"game_log": wandb.Table(data=table)})
  1118. if "comet_ml" in self.args.report_to:
  1119. log_table_to_comet_experiment(
  1120. name="game_log.csv",
  1121. table=table,
  1122. )
  1123. # Base evaluation
  1124. initial_output = super().evaluation_loop(
  1125. dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
  1126. )
  1127. return initial_output
  1128. def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
  1129. """
  1130. Log `logs` on the various objects watching training, including stored metrics.
  1131. Args:
  1132. logs (`dict[str, float]`):
  1133. The values to log.
  1134. start_time (`float` or `None`, *optional*, defaults to `None`):
  1135. Start time of the training.
  1136. """
  1137. # logs either has 'loss' or 'eval_loss'
  1138. train_eval = "train" if "loss" in logs else "eval"
  1139. # Add averaged stored metrics to logs
  1140. for key, metrics in self._stored_metrics[train_eval].items():
  1141. logs[key] = torch.tensor(metrics).mean().item()
  1142. del self._stored_metrics[train_eval]
  1143. if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
  1144. return super().log(logs, start_time)
  1145. else: # transformers<=4.46
  1146. return super().log(logs)
  1147. def _shift_right(self, input_ids):
  1148. if self.decoder_start_token_id is None:
  1149. raise ValueError(
  1150. "model.config.decoder_start_token_id has to be defined. It is usually set to the pad_token_id."
  1151. )
  1152. # shift inputs to the right
  1153. if is_torch_fx_proxy(input_ids):
  1154. # Item assignment is not supported natively for proxies.
  1155. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), self.decoder_start_token_id)
  1156. shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
  1157. else:
  1158. shifted_input_ids = input_ids.new_zeros(input_ids.shape)
  1159. shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
  1160. shifted_input_ids[..., 0] = self.decoder_start_token_id
  1161. if self.pad_token_id is None:
  1162. raise ValueError("model.config.pad_token_id has to be defined.")
  1163. # replace possible -100 values in labels by `pad_token_id`
  1164. shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id)
  1165. return shifted_input_ids
  1166. def create_model_card(
  1167. self,
  1168. model_name: Optional[str] = None,
  1169. dataset_name: Optional[str] = None,
  1170. tags: Union[str, list[str], None] = None,
  1171. ):
  1172. """
  1173. Creates a draft of a model card using the information available to the `Trainer`.
  1174. Args:
  1175. model_name (`str` or `None`, *optional*, defaults to `None`):
  1176. Name of the model.
  1177. dataset_name (`str` or `None`, *optional*, defaults to `None`):
  1178. Name of the dataset used for training.
  1179. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
  1180. Tags to be associated with the model card.
  1181. """
  1182. if not self.is_world_process_zero():
  1183. return
  1184. if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
  1185. base_model = self.model.config._name_or_path
  1186. else:
  1187. base_model = None
  1188. tags = tags or []
  1189. if isinstance(tags, str):
  1190. tags = [tags]
  1191. if hasattr(self.model.config, "unsloth_version"):
  1192. tags.append("unsloth")
  1193. citation = textwrap.dedent("""\
  1194. @article{hong2024orpo,
  1195. title = {{ORPO: Monolithic Preference Optimization without Reference Model}},
  1196. author = {Jiwoo Hong and Noah Lee and James Thorne},
  1197. year = 2024,
  1198. eprint = {arXiv:2403.07691}
  1199. }""")
  1200. model_card = generate_model_card(
  1201. base_model=base_model,
  1202. model_name=model_name,
  1203. hub_model_id=self.hub_model_id,
  1204. dataset_name=dataset_name,
  1205. tags=tags,
  1206. wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
  1207. comet_url=get_comet_experiment_url(),
  1208. trainer_name="ORPO",
  1209. trainer_citation=citation,
  1210. paper_title="ORPO: Monolithic Preference Optimization without Reference Model",
  1211. paper_id="2403.07691",
  1212. )
  1213. model_card.save(os.path.join(self.args.output_dir, "README.md"))
  1214. class UnslothORPOTrainer(_UnslothORPOTrainer):
  1215. """
  1216. Initialize ORPOTrainer.
  1217. Args:
  1218. model (`transformers.PreTrainedModel`):
  1219. The model to train, preferably an `AutoModelForSequenceClassification`.
  1220. args (`ORPOConfig`):
  1221. The ORPO config arguments to use for training.
  1222. data_collator (`transformers.DataCollator`):
  1223. The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
  1224. which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
  1225. train_dataset (`datasets.Dataset`):
  1226. The dataset to use for training.
  1227. eval_dataset (`datasets.Dataset`):
  1228. The dataset to use for evaluation.
  1229. processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
  1230. Processing class used to process the data. If provided, will be used to automatically process the inputs
  1231. for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
  1232. reuse the fine-tuned model.
  1233. model_init (`Callable[[], transformers.PreTrainedModel]`):
  1234. The model initializer to use for training. If None is specified, the default model initializer will be used.
  1235. callbacks (`list[transformers.TrainerCallback]`):
  1236. The callbacks to use for training.
  1237. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
  1238. The optimizer and scheduler to use for training.
  1239. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
  1240. The function to use to preprocess the logits before computing the metrics.
  1241. peft_config (`dict`, defaults to `None`):
  1242. The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
  1243. compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
  1244. The function to use to compute the metrics. Must take a `EvalPrediction` and return
  1245. a dictionary string to metric values.
  1246. """
  1247. def __init__(
  1248. self,
  1249. model = None,
  1250. args = None,
  1251. data_collator = None,
  1252. train_dataset = None,
  1253. eval_dataset = None,
  1254. processing_class = None,
  1255. model_init = None,
  1256. callbacks = None,
  1257. preprocess_logits_for_metrics = None,
  1258. peft_config = None,
  1259. compute_metrics = None,
  1260. **kwargs
  1261. ):
  1262. if args is None: args = UnslothORPOConfig()
  1263. use_bf16 = getattr(args, 'bf16', False)
  1264. use_fp16 = getattr(args, 'fp16', False)
  1265. dtype = getattr(model.config, 'torch_dtype', None)
  1266. if dtype is None: dtype = model.get_input_embeddings().dtype
  1267. from unsloth_zoo.utils import _get_dtype
  1268. dtype = _get_dtype(dtype)
  1269. float16 = dtype == torch.float16
  1270. if float16 and use_bf16: raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
  1271. if not float16 and use_fp16: raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
  1272. if not use_bf16 and not use_fp16:
  1273. args.fp16 = float16
  1274. args.bf16 = not float16
  1275. os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
  1276. if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
  1277. args.eval_strategy = 'steps'
  1278. if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
  1279. ga_steps = getattr(args, 'gradient_accumulation_steps', None)
  1280. if ga_steps is not None and ga_steps > 1:
  1281. from transformers import __version__ as transformers_version
  1282. if Version(transformers_version) <= Version('4.45.2'):
  1283. print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
  1284. '`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
  1285. if getattr(args, 'eval_strategy', 'no') != 'no':
  1286. eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
  1287. if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
  1288. if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
  1289. fp16_full_eval = getattr(args, 'fp16_full_eval', False)
  1290. bf16_full_eval = getattr(args, 'bf16_full_eval', False)
  1291. if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
  1292. if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
  1293. if not bf16_full_eval and not fp16_full_eval: args.bf16_full_eval = args.bf16; args.fp16_full_eval = args.fp16
  1294. if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
  1295. pass
  1296. else:
  1297. model_max_seq_length = getattr(model, 'max_seq_length', None)
  1298. args_max_seq_length = getattr(args, 'max_seq_length', None)
  1299. if args_max_seq_length is None and model_max_seq_length is not None:
  1300. max_seq_length = model.max_seq_length
  1301. if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
  1302. if model is not None and hasattr(model, 'for_training'):
  1303. model.for_training()
  1304. if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
  1305. if 'processing_class' in locals():
  1306. if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
  1307. if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
  1308. other_metrics = []
  1309. from unsloth_zoo.logging_utils import PatchRLStatistics
  1310. PatchRLStatistics('orpo_trainer', other_metrics)
  1311. super().__init__(
  1312. model = model,
  1313. args = args,
  1314. data_collator = data_collator,
  1315. train_dataset = train_dataset,
  1316. eval_dataset = eval_dataset,
  1317. processing_class = processing_class,
  1318. model_init = model_init,
  1319. callbacks = callbacks,
  1320. preprocess_logits_for_metrics = preprocess_logits_for_metrics,
  1321. peft_config = peft_config,
  1322. compute_metrics = compute_metrics,**kwargs)
  1323. if hasattr(self, 'neftune_hook_handle'):
  1324. self.neftune_hook_handle.remove()
  1325. if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
  1326. if getattr(args, 'neftune_noise_alpha', None) is not None:
  1327. model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
  1328. pass
  1329. pass