Redirects HF paper pages to arXiv.
Chrome: https://chrome.google.com/webstore/detail/redirector/ocgpenflpmgnfapjedencafcfakcekcd
Firefox: https://addons.mozilla.org/en-US/firefox/addon/redirector/
import torch.nn as nn | |
import copy | |
import torch | |
from torch.nn.attention.flex_attention import flex_attention, create_block_mask, or_masks, create_mask | |
from triton.testing import do_bench | |
from functools import partial | |
torch.set_default_device('cuda') | |
B = 4 |
import torch | |
import os | |
import json | |
from safetensors.torch import load_file, save_file | |
def replicate_lora_a(name: str, weight: "torch.Tensor") -> dict[str, "torch.Tensor"]: | |
prefix, suffix = name.split('qkv_proj') | |
res = {} | |
for t in ['q_proj', 'k_proj', 'v_proj']: | |
name = f"{prefix}{t}{suffix}" |
import torch | |
torch.set_default_device('cuda') | |
from triton.testing import do_bench | |
from collections import defaultdict | |
from functools import partial | |
import random | |
random.seed(0) | |
def get_flops(A, B): | |
ms = do_bench(lambda: torch.mm(A, B)) |
Redirects HF paper pages to arXiv.
Chrome: https://chrome.google.com/webstore/detail/redirector/ocgpenflpmgnfapjedencafcfakcekcd
Firefox: https://addons.mozilla.org/en-US/firefox/addon/redirector/
Yoav Goldberg, April 2023.
With the release of the ChatGPT model and followup large language models (LLMs), there was a lot of discussion of the importance of "RLHF training", that is, "reinforcement learning from human feedback". I was puzzled for a while as to why RL (Reinforcement Learning) is better than learning from demonstrations (a.k.a supervised learning) for training language models. Shouldn't learning from demonstrations (or, in language model terminology "instruction fine tuning", learning to immitate human written answers) be sufficient? I came up with a theoretical argument that was somewhat convincing. But I came to realize there is an additional argumment which not only supports the case of RL training, but also requires it, in particular for models like ChatGPT. This additional argument is spelled out in (the first half of) a talk by John Schulman from OpenAI. This post pretty much
#!/usr/bin/env python | |
import argparse | |
import torch | |
from transformers import GPTJForCausalLM, GPTJConfig | |
# Note: these need the git version of Transformers as of 7/22/2022 | |
from transformers import CodeGenTokenizer, CodeGenForCausalLM | |
from transformers import CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST | |
parser = argparse.ArgumentParser('Convert SalesForce CodeGen model to GPT-J') |
# http://editorconfig.org/#file-format-details | |
root = true | |
[*] | |
charset = utf-8 | |
end_of_line = lf | |
indent_size = 4 | |
indent_style = space | |
insert_final_newline = true | |
trim_trailing_whitespace = true |
# This isn't supposed to run as a bash script, i named it with ".sh" for syntax highlighting. | |
# https://developer.nvidia.com/nsight-systems | |
# https://docs.nvidia.com/nsight-systems/profiling/index.html | |
# My preferred nsys (command line executable used to create profiles) commands | |
# | |
# In your script, write | |
# torch.cuda.nvtx.range_push("region name") | |
# ... |
import math | |
import torch | |
import torch.nn as nn | |
from torch.nn import functional as F | |
class RelativePositionBias(nn.Module): | |
def __init__(self, bidirectional=True, num_buckets=32, max_distance=128, n_heads=2): | |
super(RelativePositionBias, self).__init__() | |
self.bidirectional = bidirectional |