{"id":2114,"date":"2024-02-20T16:51:51","date_gmt":"2024-02-20T08:51:51","guid":{"rendered":"https:\/\/www.aqwu.net\/wp\/?p=2114"},"modified":"2024-04-28T20:04:30","modified_gmt":"2024-04-28T12:04:30","slug":"%e5%a6%82%e4%bd%95%e6%9e%84%e5%bb%ba%e4%b8%80%e4%b8%aa%e5%9f%ba%e6%9c%ac%e7%9a%84transformer%e6%a8%a1%e5%9e%8b%e8%8b%b1%e4%b8%ad%e7%bf%bb%e8%af%91%ef%bc%89","status":"publish","type":"post","link":"https:\/\/www.aqwu.net\/wp\/?p=2114","title":{"rendered":"\u5982\u4f55\u6784\u5efa\u4e00\u4e2a\u57fa\u672c\u7684Transformer\u6a21\u578b(\u82f1\u4e2d\u7ffb\u8bd1\uff09"},"content":{"rendered":"\n<p>\u4e00\u4e2a\u7b80\u5316\u7248\u7684Transformer\u6a21\u578b\u8bad\u7ec3\u7a0b\u5e8f\u793a\u4f8b\uff0c\u4f7f\u7528PyTorch\u6846\u67b6\u3002\u8fd9\u4e2a\u793a\u4f8b\u5c55\u793a\u4e86\u5982\u4f55\u6784\u5efa\u4e00\u4e2a\u57fa\u672c\u7684Transformer\u6a21\u578b\uff0c\u7528\u4e8e\u4e00\u4e2a\u7b80\u5355\u7684\u5e8f\u5217\u5230\u5e8f\u5217\u7684\u4efb\u52a1\uff08\u4f8b\u5982\uff0c\u673a\u5668\u7ffb\u8bd1\u6216\u6587\u672c\u751f\u6210\uff09\u3002\u6ce8\u610f\uff0c\u8fd9\u4e2a\u4f8b\u5b50\u662f\u4e3a\u4e86\u6f14\u793a\u76ee\u7684\u800c\u7b80\u5316\u7684\uff0c\u5b9e\u9645\u5e94\u7528\u4e2d\u53ef\u80fd\u9700\u8981\u66f4\u590d\u6742\u7684\u6570\u636e\u5904\u7406\u3001\u6a21\u578b\u67b6\u6784\u8c03\u6574\u548c\u8bad\u7ec3\u7b56\u7565\u3002<\/p>\n\n\n\n<h2 class=\"wp-block-heading\"><strong>0. \u51c6\u5907\u73af\u5883<\/strong><\/h2>\n\n\n\n<p>\u786e\u4fdd\u5b89\u88c5\u4e86PyTorch\u3002\u53ef\u4ee5\u901a\u8fc7<code>pip install torch<\/code>\u5b89\u88c5\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code has-small-font-size\"><code><code>pip install torch<\/code><\/code><\/pre>\n\n\n\n<ol class=\"wp-block-list\">\n<li><\/li>\n<\/ol>\n\n\n\n<h2 class=\"wp-block-heading\"><strong>1. \u51c6\u5907\u8bad\u7ec3\u6837\u672c<\/strong><\/h2>\n\n\n\n<p>\u6211\u4eec\u5c06\u5b9a\u4e49\u4e00\u5c0f\u6279\u7b80\u5355\u7684\u82f1\u6587\u53e5\u5b50\u53ca\u5176\u5bf9\u5e94\u7684\u4e2d\u6587\u7ffb\u8bd1\u3002\u8fd9\u91cc\u4f7f\u7528\u7684\u662f\u6781\u5176\u7b80\u5316\u7684\u6570\u636e\u96c6\uff0c\u4ec5\u7528\u4e8e\u6f14\u793a\u76ee\u7684\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code has-small-font-size\"><code># \u82f1\u6587\u5230\u4e2d\u6587\u7684\u7b80\u5355\u53e5\u5b50\u5bf9\nenglish_sentences = &#91;\n    \"Hello, how are you?\",  # \u4f60\u597d\uff0c\u4f60\u600e\u4e48\u6837\uff1f\n    \"I am learning translation.\",  # \u6211\u5728\u5b66\u4e60\u7ffb\u8bd1\u3002\n    \"This is a pen.\",  # \u8fd9\u662f\u4e00\u652f\u7b14\u3002\n    \"What is your name?\",  # \u4f60\u53eb\u4ec0\u4e48\u540d\u5b57\uff1f\n    \"I love programming.\"  # \u6211\u7231\u7f16\u7a0b\u3002\n]\n\nchinese_sentences = &#91;\n    \"\u4f60\u597d\uff0c\u4f60\u600e\u4e48\u6837\uff1f\",\n    \"\u6211\u5728\u5b66\u4e60\u7ffb\u8bd1\u3002\",\n    \"\u8fd9\u662f\u4e00\u652f\u7b14\u3002\",\n    \"\u4f60\u53eb\u4ec0\u4e48\u540d\u5b57\uff1f\",\n    \"\u6211\u7231\u7f16\u7a0b\u3002\"\n]\n\n# \u5047\u8bbe\u6211\u4eec\u5df2\u7ecf\u6709\u4e86\u82f1\u6587\u548c\u4e2d\u6587\u7684\u8bcd\u6c47\u8868\u548c\u7f16\u7801\u51fd\u6570\uff08\u5728\u5b9e\u9645\u5e94\u7528\u4e2d\uff0c\u9700\u8981\u6839\u636e\u5b9e\u9645\u6570\u636e\u6784\u5efa\uff09\n# \u4e3a\u4e86\u7b80\u5316\uff0c\u6211\u4eec\u8fd9\u91cc\u4e0d\u5b9e\u73b0\u8fd9\u4e00\u90e8\u5206\uff0c\u800c\u662f\u76f4\u63a5\u4f7f\u7528\u82f1\u6587\u548c\u4e2d\u6587\u53e5\u5b50\u7684\u7d22\u5f15\u8868\u793a\n\n<\/code><\/pre>\n\n\n\n<h2 class=\"wp-block-heading\"><strong>2. \u8bad\u7ec3\u4ee3\u7801<\/strong><\/h2>\n\n\n\n<pre class=\"wp-block-code\"><code>import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport math\nimport jieba\nimport pickle\n\nclass TransformerModel(nn.Module):\n    def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\n        super(TransformerModel, self).__init__()\n        self.model_type = 'Transformer'\n        self.pos_encoder = PositionalEncoding(ninp, dropout)\n        encoder_layers = nn.TransformerEncoderLayer(d_model=ninp, nhead=nhead, dim_feedforward=nhid, dropout=dropout, batch_first=True)\n        self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers=nlayers)\n        self.encoder = nn.Embedding(ntoken, ninp)\n        self.ninp = ninp\n        self.decoder = nn.Linear(ninp, ntoken)\n        self.init_weights()\n\n    def init_weights(self):\n        initrange = 0.1\n        self.encoder.weight.data.uniform_(-initrange, initrange)\n        self.decoder.bias.data.zero_()\n        self.decoder.weight.data.uniform_(-initrange, initrange)\n\n    def forward(self, src):\n        src = self.encoder(src) * math.sqrt(self.ninp)\n        src = self.pos_encoder(src)\n        output = self.transformer_encoder(src)\n        output = self.decoder(output)\n        return output\n\nclass PositionalEncoding(nn.Module):\n    def __init__(self, d_model, dropout=0.1, max_len=5000):\n        super(PositionalEncoding, self).__init__()\n        self.dropout = nn.Dropout(p=dropout)\n\n        position = torch.arange(max_len).unsqueeze(1)\n        div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) \/ d_model))\n        pe = torch.zeros(max_len, d_model)\n        pe&#91;:, 0::2] = torch.sin(position * div_term)\n        pe&#91;:, 1::2] = torch.cos(position * div_term)\n        self.register_buffer('pe', pe.unsqueeze(0))\n\n    def forward(self, x):\n        x = x + self.pe&#91;:, :x.size(1)]\n        return self.dropout(x)\n\n# \u82f1\u6587\u5230\u4e2d\u6587\u7684\u7b80\u5355\u53e5\u5b50\u5bf9\nenglish_sentences = &#91;\n    \"Hello, how are you?\",  # \u4f60\u597d\uff0c\u4f60\u600e\u4e48\u6837\uff1f\n    \"I am learning translation.\",  # \u6211\u5728\u5b66\u4e60\u7ffb\u8bd1\u3002\n    \"This is a pen.\",  # \u8fd9\u662f\u4e00\u652f\u7b14\u3002\n    \"What is your name?\",  # \u4f60\u53eb\u4ec0\u4e48\u540d\u5b57\uff1f\n    \"I love programming.\"  # \u6211\u7231\u7f16\u7a0b\u3002\n]\n\nchinese_sentences = &#91;\n    \"\u4f60\u597d\uff0c\u4f60\u600e\u4e48\u6837\uff1f\",\n    \"\u6211\u5728\u5b66\u4e60\u7ffb\u8bd1\u3002\",\n    \"\u8fd9\u662f\u4e00\u652f\u7b14\u3002\",\n    \"\u4f60\u53eb\u4ec0\u4e48\u540d\u5b57\uff1f\",\n    \"\u6211\u7231\u7f16\u7a0b\u3002\"\n]\n\n# \u5b9a\u4e49\u7ed3\u675f\u6807\u8bb0\nend_token = '&lt;eos>'\n\n# \u6784\u5efa\u8bcd\u6c47\u8868\u7684\u51fd\u6570\ndef build_english_vocab(sentences):\n    vocab = set(word for sentence in sentences for word in sentence.split())\n    # \u6dfb\u52a0\u7ed3\u675f\u6807\u8bb0\u5230\u8bcd\u6c47\u8868\u4e2d\n    vocab.add(end_token)\n    return {word: i for i, word in enumerate(vocab)}\n\n# \u4f7f\u7528jieba\u8fdb\u884c\u5206\u8bcd\ndef build_chinese_vocab(sentences):\n    vocab = set(word for sentence in sentences for word in jieba.cut(sentence))\n    # \u6dfb\u52a0\u7ed3\u675f\u6807\u8bb0\u5230\u8bcd\u6c47\u8868\u4e2d\n    vocab.add(end_token)\n    return {word: i for i, word in enumerate(vocab)}\n\ndef encode_english(sentence, vocab, max_len):\n    words = sentence.split()&#91;:max_len - 1]  # \u4fdd\u7559\u4e00\u4e2a\u4f4d\u7f6e\u7ed9\u7ed3\u675f\u6807\u8bb0\n    words.append(end_token)  # \u6dfb\u52a0\u7ed3\u675f\u6807\u8bb0\n    return &#91;vocab.get(word, vocab&#91;'&lt;unk>']) for word in words]\n\ndef encode_chinese(sentence, vocab, max_len):\n    words = list(jieba.cut(sentence))&#91;:max_len - 1]  # \u4fdd\u7559\u4e00\u4e2a\u4f4d\u7f6e\u7ed9\u7ed3\u675f\u6807\u8bb0\n    words.append(end_token)  # \u6dfb\u52a0\u7ed3\u675f\u6807\u8bb0\n    return &#91;vocab.get(word, vocab&#91;'&lt;unk>']) for word in words]\n\n# \u53c2\u6570\u8bbe\u7f6e\nntokens = 1000  # \u8bcd\u6c47\u8868\u5927\u5c0f\nemsize = 200  # \u5d4c\u5165\u7ef4\u5ea6\nnhid = 200  # \u524d\u9988\u7f51\u7edc\u7684\u7ef4\u5ea6\nnlayers = 2  # Transformer\u5c42\u7684\u6570\u91cf\nnhead = 2  # \u591a\u5934\u6ce8\u610f\u529b\u7684\u5934\u6570\ndropout = 0.2  # dropout\u7684\u6bd4\u4f8b\n\nenglish_vocab = build_english_vocab(english_sentences)\nchinese_vocab = build_chinese_vocab(chinese_sentences)\n\n# \u7f16\u7801\u51fd\u6570\n# \u5047\u8bbe\u8bcd\u6c47\u8868\u4e2d\u5305\u542b&lt;unk>\nenglish_vocab&#91;'&lt;unk>'] = len(english_vocab)\nchinese_vocab&#91;'&lt;unk>'] = len(chinese_vocab)\n\n# \u5b9a\u4e49\u6700\u5927\u5e8f\u5217\u957f\u5ea6\nmax_seq_length = max(len(sentence.split()) for sentence in english_sentences)\n\n# \u7f16\u7801\u82f1\u6587\u548c\u6cd5\u6587\u53e5\u5b50\nencoded_english_sentences = &#91;encode_english(sentence, english_vocab, max_seq_length) for sentence in english_sentences]\nencoded_chinese_sentences = &#91;encode_chinese(sentence, chinese_vocab, max_seq_length) for sentence in chinese_sentences]\n\n# \u8bcd\u6c47\u8868\u5927\u5c0f\nenglish_vocab_size = len(english_vocab)\nchinese_vocab_size = len(chinese_vocab)\n\nprint(f\"english_vocab_size {english_vocab_size}\")\nprint(f\"chinese_vocab_size {chinese_vocab_size}\")\n\nntokens = chinese_vocab_size  # \u5c06\u4e2d\u6587\u8bcd\u6c47\u8868\u7684\u5927\u5c0f\u7528\u4f5c ntokens\n\nmodel = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout)\n\n# \u8bad\u7ec3\u4ee3\u7801\uff08\u4f2a\u4ee3\u7801\uff09\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n\n# \u5047\u8bbe\u5df2\u7ecf\u5b9a\u4e49\u4e86\u6a21\u578b\u3001\u635f\u5931\u51fd\u6570\u548c\u4f18\u5316\u5668\n# \u6ce8\u610f\uff1a\u5728\u5b9e\u9645\u5e94\u7528\u4e2d\uff0c\u4f60\u9700\u8981\u6839\u636e\u4efb\u52a1\u8c03\u6574\u6a21\u578b\u7684\u8f93\u5165\u8f93\u51fa\u7ef4\u5ea6\u4ee5\u53ca\u5176\u4ed6\u53c2\u6570\n\nepochs = 2000\n\nfor epoch in range(epochs):\n    total_loss = 0\n    for eng, chi in zip(encoded_english_sentences, encoded_chinese_sentences):\n        model.train()\n        optimizer.zero_grad()\n\n        # \u8c03\u6574\u8f93\u5165\u5e8f\u5217\u7684\u5f62\u72b6\u4ee5\u5339\u914d batch_first=True\n        # &#91;\u6279\u6b21\u5927\u5c0f, \u5e8f\u5217\u957f\u5ea6]\n        src = torch.tensor(&#91;eng], dtype=torch.long)  # \u6dfb\u52a0\u989d\u5916\u7684\u7ef4\u5ea6\u6765\u8868\u793a\u6279\u6b21\u5927\u5c0f\n        tgt = torch.tensor(&#91;chi], dtype=torch.long)\n\n        output = model(src)\n\n        # \u91cd\u5851\u8f93\u51fa\u548c\u76ee\u6807\u4ee5\u9002\u5e94\u4ea4\u53c9\u71b5\u635f\u5931\n        output_reshaped = output.view(-1, ntokens)\n        tgt_reshaped = tgt.view(-1)\n\n        loss = criterion(output_reshaped, tgt_reshaped)\n        loss.backward()\n        optimizer.step()\n\n        total_loss += loss.item()\n\n    if epoch % 100 == 0:\n        print(f\"Epoch {epoch+1}, Loss: {total_loss \/ len(encoded_english_sentences)}\")\n\n# \u4fdd\u5b58\u6a21\u578b\u7684\u72b6\u6001\u5b57\u5178\nmodel_path = f\"models\/gpt_model_1_{epochs}.pth\"\ntorch.save(model.state_dict(), model_path)\nprint(f\"Model saved to {model_path}\")\n\n# \u4fdd\u5b58 ntokens\nprint(f\"ntokens {ntokens}\")\nwith open(f\"models\/ntokens_1_{epochs}.pkl\", \"wb\") as f:\n    pickle.dump(ntokens, f)\n\n# \u4fdd\u5b58 max_seq_length\nprint(f\"max_seq_length {max_seq_length}\")\nwith open(f\"models\/max_seq_length_1_{epochs}.pkl\", \"wb\") as f:\n    pickle.dump(max_seq_length, f)\n\n# \u4fdd\u5b58\u82f1\u6587\u8bcd\u6c47\u8868\nprint(f\"english_vocab {english_vocab}\")\nwith open(f\"models\/english_vocab_1_{epochs}.pkl\", \"wb\") as f:\n    pickle.dump(english_vocab, f)\n\n# \u4fdd\u5b58\u4e2d\u6587\u8bcd\u6c47\u8868\nprint(f\"chinese_vocab {chinese_vocab}\")\nwith open(f\"models\/chinese_vocab_1_{epochs}.pkl\", \"wb\") as f:\n    pickle.dump(chinese_vocab, f)\n\n\n# \u8f93\u51fa\u6a21\u578b\u53c2\u6570\u6570\u91cf\nprint(sum(p.numel() for p in model.parameters())\/1e6, 'M parameters')\n<\/code><\/pre>\n\n\n\n<h2 class=\"wp-block-heading\"><strong>3. \u52a0\u8f7d\u6a21\u578b\u4ea7\u751f\u6587\u672c<\/strong><\/h2>\n\n\n\n<pre class=\"wp-block-code has-small-font-size\"><code>import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport math\nimport pickle\n\nclass TransformerModel(nn.Module):\n    def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\n        super(TransformerModel, self).__init__()\n        self.model_type = 'Transformer'\n        self.pos_encoder = PositionalEncoding(ninp, dropout)\n        encoder_layers = nn.TransformerEncoderLayer(d_model=ninp, nhead=nhead, dim_feedforward=nhid, dropout=dropout, batch_first=True)\n        self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers=nlayers)\n        self.encoder = nn.Embedding(ntoken, ninp)\n        self.ninp = ninp\n        self.decoder = nn.Linear(ninp, ntoken)\n        self.init_weights()\n\n    def init_weights(self):\n        initrange = 0.1\n        self.encoder.weight.data.uniform_(-initrange, initrange)\n        self.decoder.bias.data.zero_()\n        self.decoder.weight.data.uniform_(-initrange, initrange)\n\n    def forward(self, src):\n        src = self.encoder(src) * math.sqrt(self.ninp)\n        src = self.pos_encoder(src)\n        output = self.transformer_encoder(src)\n        output = self.decoder(output)\n        return output\n\nclass PositionalEncoding(nn.Module):\n    def __init__(self, d_model, dropout=0.1, max_len=5000):\n        super(PositionalEncoding, self).__init__()\n        self.dropout = nn.Dropout(p=dropout)\n\n        position = torch.arange(max_len).unsqueeze(1)\n        div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) \/ d_model))\n        pe = torch.zeros(max_len, d_model)\n        pe&#91;:, 0::2] = torch.sin(position * div_term)\n        pe&#91;:, 1::2] = torch.cos(position * div_term)\n        self.register_buffer('pe', pe.unsqueeze(0))\n\n    def forward(self, x):\n        x = x + self.pe&#91;:, :x.size(1)]\n        return self.dropout(x)\n\nend_token = '&lt;eos>'\n\ndef encode_english(sentence, vocab, max_len):\n    words = sentence.split()&#91;:max_len - 1]  # \u4fdd\u7559\u4e00\u4e2a\u4f4d\u7f6e\u7ed9\u7ed3\u675f\u6807\u8bb0\n    words.append(end_token)  # \u6dfb\u52a0\u7ed3\u675f\u6807\u8bb0\n    return &#91;vocab.get(word, vocab&#91;'&lt;unk>']) for word in words]\n\ndef decode_chinese(indices, vocab):\n    words = &#91;list(vocab.keys())&#91;list(vocab.values()).index(idx)] for idx in indices]\n    # \u53bb\u9664\u7ed3\u675f\u6807\u8bb0\u4e4b\u540e\u7684\u90e8\u5206\n    if end_token in words:\n        words = words&#91;:words.index(end_token)]\n    return ''.join(words)\n\n# \u52a0\u8f7d\u6a21\u578b\nntokens = 1000  # \u5047\u8bbe\u8bcd\u6c47\u8868\u5927\u5c0f\u4e3a1000\nemsize = 200  # \u5d4c\u5165\u7ef4\u5ea6\nnhid = 200  # \u524d\u9988\u7f51\u7edc\u7684\u7ef4\u5ea6\nnlayers = 2  # Transformer\u5c42\u7684\u6570\u91cf\nnhead = 2  # \u591a\u5934\u6ce8\u610f\u529b\u7684\u5934\u6570\ndropout = 0.2  # dropout\u7684\u6bd4\u4f8b\n\nepochs = 2000\n\n# \u52a0\u8f7d ntokens\nwith open(f\"models\/ntokens_1_{epochs}.pkl\", \"rb\") as f:\n    ntokens = pickle.load(f)\nprint(f\"ntokens {ntokens}\")\n\n# \u52a0\u8f7d max_seq_length\nwith open(f\"models\/max_seq_length_1_{epochs}.pkl\", \"rb\") as f:\n    max_seq_length = pickle.load(f)\nprint(f\"max_seq_length {max_seq_length}\")\n\n# \u52a0\u8f7d\u82f1\u6587\u8bcd\u6c47\u8868\nwith open(f\"models\/english_vocab_1_{epochs}.pkl\", \"rb\") as f:\n    english_vocab = pickle.load(f)\nprint(f\"english_vocab {english_vocab}\")\n\n# \u52a0\u8f7d\u4e2d\u6587\u8bcd\u6c47\u8868\nwith open(f\"models\/chinese_vocab_1_{epochs}.pkl\", \"rb\") as f:\n    chinese_vocab = pickle.load(f)\nprint(f\"chinese_vocab {chinese_vocab}\")\n    \nmodel = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout)\n\nmodel_path = f\"models\/gpt_model_1_{epochs}.pth\"\nmodel.load_state_dict(torch.load(model_path))\nmodel.eval()\n\n\n# \u5b9a\u4e49\u751f\u6210\u6587\u672c\u7684\u51fd\u6570\ndef generate_text(model, start_text, max_len=50):\n    with torch.no_grad():\n        tokenized_start_text = encode_english(start_text, english_vocab, max_seq_length)\n        input_seq = torch.tensor(&#91;tokenized_start_text], dtype=torch.long)\n        for _ in range(max_len):\n            output = model(input_seq)\n            predicted_token = torch.argmax(output&#91;:, -1, :], dim=-1)  # \u83b7\u53d6\u6700\u540e\u4e00\u4e2a\u65f6\u95f4\u6b65\u7684\u9884\u6d4btoken\n            input_seq = torch.cat(&#91;input_seq, predicted_token.unsqueeze(0)], dim=1)  # \u5c06\u9884\u6d4btoken\u8fde\u63a5\u5230\u8f93\u5165\u5e8f\u5217\u4e2d\n            if predicted_token.item() == end_token:  # \u5982\u679c\u751f\u6210\u4e86\u7ec8\u6b62\u7b26\u53f7\uff0c\u505c\u6b62\u751f\u6210\n                break\n        generated_text = decode_chinese(input_seq.squeeze().tolist(), chinese_vocab)\n        return generated_text\n\n# \u4f7f\u7528\u751f\u6210\u6587\u672c\u7684\u51fd\u6570\nstart_text = \"Hello, how are you?\"  # \u521d\u59cb\u6587\u672c\ngenerated_text = generate_text(model, start_text)\nprint(\"Generated text:\", generated_text)\n<\/code><\/pre>\n\n\n\n<p>\u8fd0\u884c\u7684\u7ed3\u679c\uff1a<\/p>\n\n\n\n<pre class=\"wp-block-code has-small-font-size\"><code>ntokens 20\nmax_seq_length 4\nenglish_vocab {'how': 0, 'learning': 1, 'are': 2, 'am': 3, 'Hello,': 4, 'pen.': 5, 'a': 6, 'This': 7, '&lt;eos>': 8, 'your': 9, 'you?': 10, 'translation.': 11, 'I': 12, 'is': 13, 'programming.': 14, 'What': 15, 'name?': 16, 'love': 17, '&lt;unk>': 18}\nchinese_vocab {'\uff0c': 0, '&lt;eos>': 1, '\u5728': 2, '\u540d\u5b57': 3, '\u8fd9\u662f': 4, '\u6211': 5, '\u7b14': 6, '\u7ffb\u8bd1': 7, '\u3002': 8, '\u5b66\u4e60': 9, '\u7f16\u7a0b': 10, '\u4f60\u597d': 11, '\u600e\u4e48\u6837': 12, '\u53eb': 13, '\u4e00\u652f': 14, '\u7231': 15, '\u4f60': 16, '\u4ec0\u4e48': 17, ' \uff1f': 18, '&lt;unk>': 19}\nGenerated text: \u8fd9\u662f\uff0c\u5728\u3002<\/code><\/pre>\n","protected":false},"excerpt":{"rendered":"<p>\u4e00\u4e2a\u7b80\u5316\u7248\u7684Transformer\u6a21\u578b\u8bad\u7ec3\u7a0b\u5e8f\u793a\u4f8b\uff0c\u4f7f\u7528PyTorch\u6846\u67b6\u3002\u8fd9\u4e2a\u793a\u4f8b\u5c55\u793a\u4e86\u5982\u4f55\u6784\u5efa\u4e00\u4e2a\u57fa\u672c\u7684T [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"site-sidebar-layout":"default","site-content-layout":"","ast-site-content-layout":"default","site-content-style":"default","site-sidebar-style":"default","ast-global-header-display":"","ast-banner-title-visibility":"","ast-main-header-display":"","ast-hfb-above-header-display":"","ast-hfb-below-header-display":"","ast-hfb-mobile-header-display":"","site-post-title":"","ast-breadcrumbs-content":"","ast-featured-img":"","footer-sml-layout":"","theme-transparent-header-meta":"","adv-header-id-meta":"","stick-header-meta":"","header-above-stick-meta":"","header-main-stick-meta":"","header-below-stick-meta":"","astra-migrate-meta-layouts":"set","ast-page-background-enabled":"default","ast-page-background-meta":{"desktop":{"background-color":"var(--ast-global-color-4)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"tablet":{"background-color":"","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"mobile":{"background-color":"","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""}},"ast-content-background-meta":{"desktop":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"tablet":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"mobile":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""}},"_jetpack_memberships_contains_paid_content":false,"footnotes":""},"categories":[313,289,443,442,312],"tags":[242,314],"class_list":["post-2114","post","type-post","status-publish","format-standard","hentry","category-chatgpt","category-gpt","category-llm","category-llms","category-openai","tag-chatgpt","tag-openai-api"],"views":1807,"jetpack_sharing_enabled":true,"jetpack_featured_media_url":"","_links":{"self":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts\/2114","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=2114"}],"version-history":[{"count":5,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts\/2114\/revisions"}],"predecessor-version":[{"id":2119,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts\/2114\/revisions\/2119"}],"wp:attachment":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=2114"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=2114"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=2114"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}