{"id":2444,"date":"2024-03-11T03:31:25","date_gmt":"2024-03-10T19:31:25","guid":{"rendered":"https:\/\/www.aqwu.net\/wp\/?p=2444"},"modified":"2024-04-28T20:02:50","modified_gmt":"2024-04-28T12:02:50","slug":"%e4%ba%86%e8%a7%a3-llama-2-%e6%a8%a1%e5%9e%8b%e7%bb%93%e6%9e%844","status":"publish","type":"post","link":"https:\/\/www.aqwu.net\/wp\/?p=2444","title":{"rendered":"\u4e86\u89e3 LLaMA-2 \u6a21\u578b\u7ed3\u6784(4)"},"content":{"rendered":"\n<h2 class=\"wp-block-heading\"><strong>8. \u4fdd\u5b58\u8f6c\u6362\u540e\u7684\u6a21\u578b<\/strong><\/h2>\n\n\n\n<p>\u8f6c\u6362\u6a21\u578b\u7684\u76ee\u7684\u662f\u4e3a\u4e86\u4f7f\u7528c\/c++\u6765\u4f7f\u7528\uff0c\u524d\u9762\u7684\u6a21\u578b\u8f6c\u6362\u4e3a\u81ea\u5df1\u7684\u6a21\u578b\u540e\uff0c\u9700\u8981\u4fdd\u5b58\u4e0b\u6765\uff0c\uff0c\u53c2\u7167 https:\/\/github.com\/karpathy\/llama2.c \u9879\u76ee\u4e0b\u7684 export.py \u6587\u4ef6\uff0c\u547d\u540d\u4e3a test08.py\uff0c\u6587\u4ef6\u4fdd\u5b58\u5230 newsrc \u76ee\u5f55\u4e0b\uff1a<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >import os\nimport gzip\nimport shutil\nimport struct\nimport argparse\nimport json\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom transformers import AutoModelForCausalLM\nfrom model import ModelArgs, Transformer\n\ndef load_hf_model(model_path):\n\n    # load HF model\n    hf_model = AutoModelForCausalLM.from_pretrained(model_path)\n    hf_dict = hf_model.state_dict()\n\n    # convert LlamaConfig to ModelArgs\n    config = ModelArgs()\n    config.dim = hf_model.config.hidden_size\n    config.n_layers = hf_model.config.num_hidden_layers\n    config.n_heads = hf_model.config.num_attention_heads\n    config.n_kv_heads = hf_model.config.num_attention_heads\n    config.vocab_size = hf_model.config.vocab_size\n    config.hidden_dim = hf_model.config.intermediate_size\n    config.norm_eps = hf_model.config.rms_norm_eps\n    config.max_seq_len = hf_model.config.max_position_embeddings\n\n    # create a new Transformer object and set weights\n    model = Transformer(config)\n\n    model.tok_embeddings.weight = nn.Parameter(hf_dict['model.embed_tokens.weight'])\n    model.norm.weight = nn.Parameter(hf_dict['model.norm.weight'])\n\n    # huggingface permutes WQ and WK, this function reverses it\n    def permute_reverse(w, n_heads=config.n_heads, dim1=config.dim, dim2=config.dim):\n        return w.view(n_heads, 2, dim1 \/\/ n_heads \/\/ 2, dim2).transpose(1, 2).reshape(dim1, dim2)\n\n    for layer in model.layers:\n        i = layer.layer_id\n        layer.attention_norm.weight = nn.Parameter(hf_dict[f'model.layers.{i}.input_layernorm.weight'])\n        layer.attention.wq.weight = nn.Parameter(permute_reverse(hf_dict[f'model.layers.{i}.self_attn.q_proj.weight']))\n        layer.attention.wk.weight = nn.Parameter(permute_reverse(hf_dict[f'model.layers.{i}.self_attn.k_proj.weight']))\n        layer.attention.wv.weight = nn.Parameter(hf_dict[f'model.layers.{i}.self_attn.v_proj.weight'])\n        layer.attention.wo.weight = nn.Parameter(hf_dict[f'model.layers.{i}.self_attn.o_proj.weight'])\n        layer.ffn_norm.weight = nn.Parameter(hf_dict[f'model.layers.{i}.post_attention_layernorm.weight'])\n        layer.feed_forward.w1.weight = nn.Parameter(hf_dict[f'model.layers.{i}.mlp.gate_proj.weight'])\n        layer.feed_forward.w2.weight = nn.Parameter(hf_dict[f'model.layers.{i}.mlp.down_proj.weight'])\n        layer.feed_forward.w3.weight = nn.Parameter(hf_dict[f'model.layers.{i}.mlp.up_proj.weight'])\n\n    # final classifier\n    model.output.weight = nn.Parameter(hf_dict['lm_head.weight'])\n    model.eval()\n    return model\n\n# legacy\ndef serialize_fp32(file, tensor):\n    \"\"\" writes one fp32 tensor to file that is open in wb mode \"\"\"\n    d = tensor.detach().cpu().view(-1).to(torch.float32).numpy()\n    b = struct.pack(f'{len(d)}f', *d)\n    file.write(b)\n\ndef legacy_export(model, filepath):\n    \"\"\" Original export of llama2.c bin files, i.e. version v0 \"\"\"\n    out_file = open(filepath, 'wb')\n\n    # first write out the header\n    hidden_dim = model.layers[0].feed_forward.w1.weight.shape[0]\n    p = model.params\n    shared_classifier = torch.equal(model.tok_embeddings.weight, model.output.weight)\n    # legacy format uses negative\/positive vocab size as a shared classifier flag\n    if not shared_classifier:\n        p.vocab_size = -p.vocab_size\n    n_kv_heads = p.n_heads if p.n_kv_heads is None else p.n_kv_heads\n    header = struct.pack('iiiiiii', p.dim, hidden_dim, p.n_layers, p.n_heads,\n                                    n_kv_heads, p.vocab_size, p.max_seq_len)\n    out_file.write(header)\n\n    # next write out the embedding weights\n    serialize_fp32(out_file, model.tok_embeddings.weight)\n\n    # now all the layers\n    # attention weights\n    for layer in model.layers:\n        serialize_fp32(out_file, layer.attention_norm.weight)\n    for layer in model.layers:\n        serialize_fp32(out_file, layer.attention.wq.weight)\n    for layer in model.layers:\n        serialize_fp32(out_file, layer.attention.wk.weight)\n    for layer in model.layers:\n        serialize_fp32(out_file, layer.attention.wv.weight)\n    for layer in model.layers:\n        serialize_fp32(out_file, layer.attention.wo.weight)\n    # ffn weights\n    for layer in model.layers:\n        serialize_fp32(out_file, layer.ffn_norm.weight)\n    for layer in model.layers:\n        serialize_fp32(out_file, layer.feed_forward.w1.weight)\n    for layer in model.layers:\n        serialize_fp32(out_file, layer.feed_forward.w2.weight)\n    for layer in model.layers:\n        serialize_fp32(out_file, layer.feed_forward.w3.weight)\n    # final rmsnorm\n    serialize_fp32(out_file, model.norm.weight)\n    # freqs_cis\n    serialize_fp32(out_file, model.freqs_cos[:p.max_seq_len])\n    serialize_fp32(out_file, model.freqs_sin[:p.max_seq_len])\n\n    # final classifier weights\n    if not shared_classifier:\n        serialize_fp32(out_file, model.output.weight)\n\n    # write to binary file\n    out_file.close()\n    print(f\"wrote {filepath}\")\n\n# \u6307\u5b9a\u6a21\u578b\u8def\u5f84\nmodel_path = \"meta-llama\/Llama-2-7b-chat-hf\"\noutput_path = \"output\/model.bin\"\n\nmodel = load_hf_model(model_path)\nlegacy_export(model, output_path)\n<\/pre><\/div>\n\n\n\n<p>\u8fd0\u884c test08.py<\/p>\n\n\n\n<p>\u6700\u540e\u68c0\u67e5 output \u76ee\u5f55\u7684\u5185\u5bb9\u5982\u4e0b\uff1a<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:sh decode:true \" >python newsrc\/test08.py\nLoading checkpoint shards: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2\/2 [02:01&lt;00:00, 60.74s\/it]\nwrote output\/model.bin\n\nls -l output\/\ntotal 26323988\n-rwxrwxrwx 1 tony tony 26955759644 Mar 12 01:23 model.bin\n<\/pre><\/div>\n","protected":false},"excerpt":{"rendered":"<p>8. \u4fdd\u5b58\u8f6c\u6362\u540e\u7684\u6a21\u578b \u8f6c\u6362\u6a21\u578b\u7684\u76ee\u7684\u662f\u4e3a\u4e86\u4f7f\u7528c\/c++\u6765\u4f7f\u7528\uff0c\u524d\u9762\u7684\u6a21\u578b\u8f6c\u6362\u4e3a\u81ea\u5df1\u7684\u6a21\u578b\u540e\uff0c\u9700\u8981\u4fdd\u5b58\u4e0b\u6765\uff0c [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"site-sidebar-layout":"default","site-content-layout":"","ast-site-content-layout":"default","site-content-style":"default","site-sidebar-style":"default","ast-global-header-display":"","ast-banner-title-visibility":"","ast-main-header-display":"","ast-hfb-above-header-display":"","ast-hfb-below-header-display":"","ast-hfb-mobile-header-display":"","site-post-title":"","ast-breadcrumbs-content":"","ast-featured-img":"","footer-sml-layout":"","theme-transparent-header-meta":"","adv-header-id-meta":"","stick-header-meta":"","header-above-stick-meta":"","header-main-stick-meta":"","header-below-stick-meta":"","astra-migrate-meta-layouts":"set","ast-page-background-enabled":"default","ast-page-background-meta":{"desktop":{"background-color":"var(--ast-global-color-4)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"tablet":{"background-color":"","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"mobile":{"background-color":"","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""}},"ast-content-background-meta":{"desktop":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"tablet":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"mobile":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""}},"_jetpack_memberships_contains_paid_content":false,"footnotes":""},"categories":[313,289,443,442,312],"tags":[242,314],"class_list":["post-2444","post","type-post","status-publish","format-standard","hentry","category-chatgpt","category-gpt","category-llm","category-llms","category-openai","tag-chatgpt","tag-openai-api"],"views":1235,"jetpack_sharing_enabled":true,"jetpack_featured_media_url":"","_links":{"self":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts\/2444","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=2444"}],"version-history":[{"count":12,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts\/2444\/revisions"}],"predecessor-version":[{"id":2506,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts\/2444\/revisions\/2506"}],"wp:attachment":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=2444"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=2444"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=2444"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}