= array(); } $post['classname'] = 'post'; } function comment_format(&$post) { global $conf, $uid, $gid, $forumlist; if (empty($post)) return; $forum = $post['fid'] ? forum_read($post['fid']) : ''; $thread = well_thread_read_cache($post['tid']); if ($thread) { //$post['fid'] = $thread['fid']; $post['closed'] = $thread['closed']; $post['subject'] = $thread['subject']; $post['url'] = $thread['url']; } else { $post['closed'] = 0; $post['subject'] = lang('thread_not_exists'); $post['url'] = ''; } $post['create_date_fmt'] = humandate($post['create_date']); //$post['message'] = stripslashes(htmlspecialchars_decode($post['message'])); $user = user_read_cache($post['uid']); $post['username'] = array_value($user, 'username'); $post['user_avatar_url'] = array_value($user, 'avatar_url'); $post['user'] = $user ? user_safe_info($user) : user_guest(); isset($post['floor']) || $post['floor'] = 0; // 权限判断 $post['allowupdate'] = 2 == array_value($forum, 'comment', 0) && ($uid == $post['uid'] || forum_access_mod($post['fid'], $gid, 'allowupdate')); $post['allowdelete'] = group_access($gid, 'allowuserdelete') && $uid == $post['uid'] || forum_access_mod($post['fid'], $gid, 'allowdelete'); $post['user_url'] = url('user-' . $post['uid'] . ($post['uid'] ? '' : '-' . $post['pid'])); if ($post['files'] > 0) { list($attachlist, $imagelist, $filelist) = well_attach_find_by_pid($post['pid']); // 使用图床 评论使用图床,mysql会过多,写死链接到内容是减轻mysql的过多的方法 if (2 == $conf['attach_on']) { foreach ($imagelist as $key => $attach) { $url = $conf['upload_url'] . 'website_attach/' . $attach['filename']; // 替换成图床 $post['message'] = FALSE !== strpos($post['message'], $url) && $attach['image_url'] ? str_replace($url, $attach['image_url'], $post['message']) : $post['message']; } } $post['filelist'] = $filelist; } else { $post['filelist'] = array(); } $post['classname'] = 'post'; } function comment_format_message(&$val) { global $conf; if (empty($val)) return; // 使用云储存 if (1 == $conf['attach_on'] && 1 == $val['attach_on']) { $val['message'] = str_replace('="upload/', '="' . file_path($val['attach_on']), $val['message']); } elseif (2 == $conf['attach_on'] && 2 == $val['attach_on']) { // 使用图床 list($attachlist, $imagelist, $filelist) = well_attach_find_by_tid($val['tid']); foreach ($imagelist as $key => $attach) { $url = $conf['upload_url'] . 'website_attach/' . $attach['filename']; // 替换成图床 $val['message'] = FALSE !== strpos($val['message'], $url) && $attach['image_url'] ? str_replace($url, $attach['image_url'], $val['message']) : $val['message']; } } else { $val['message'] = str_replace('="upload/', '="' . file_path($val['attach_on']), $val['message']); } //$val['message'] = stripslashes(htmlspecialchars_decode($val['message'])); } // 把内容中使用了云储存的附件链接替换掉 function comment_message_replace_url($pid, $message) { global $conf; if (0 == $conf['attach_on']) { $message = FALSE !== strpos($message, '="../upload/') ? str_replace('="../upload/', '="upload/', $message) : $message; $message = FALSE !== strpos($message, '="/upload/') ? str_replace('="/upload/', '="upload/', $message) : $message; } elseif (1 == $conf['attach_on']) { // 使用云储存 $message = str_replace('="' . $conf['cloud_url'] . 'upload/', '="upload/', $message); } elseif (2 == $conf['attach_on']) { // 使用图床 评论使用图床,mysql会过多,写死链接到内容是减轻mysql的过多的方法 list($attachlist, $imagelist, $filelist) = well_attach_find_by_pid($pid); foreach ($imagelist as $key => $attach) { $url = $conf['upload_url'] . 'website_attach/' . $attach['filename']; // 替换回相对链接 $message = $attach['image_url'] && FALSE !== strpos($message, $attach['image_url']) ? str_replace($attach['image_url'], $url, $message) : $message; } } return $message; } function comment_filter($val) { unset($val['userip']); return $val; } function comment_highlight_keyword($str, $k) { $r = str_ireplace($k, '' . $k . '', $str); return $r; } // //
function comment_message_format(&$s) { if (xn_strlen($s) < 100) return; $s = preg_replace('#.*?
#is', '', $s); $s = str_ireplace(array('
', '
', '
', '

', '', '', '', '' . ''), "\r\n", $s); $s = str_ireplace(array(' '), " ", $s); $s = strip_tags($s); $s = preg_replace('#[\r\n]+#', "\n", $s); $s = xn_substr(trim($s), 0, 100); $s = str_replace("\n", '
', $s); } // 对内容进行引用 function comment_quote($quotepid) { $quotepost = comment_read($quotepid); if (empty($quotepost)) return ''; $uid = $quotepost['uid']; $s = $quotepost['message']; $s = comment_brief($s, 100); $userhref = url('user-' . $uid); $user = user_read_cache($uid); $r = '
' . $user['username'] . ' ' . $s . '
'; return $r; } // 获取内容的简介 0: html, 1: txt; 2: markdown; 3: ubb function comment_brief($s, $len = 100) { $s = strip_tags($s); $s = htmlspecialchars($s); $more = xn_strlen($s) > $len ? ' ... ' : ''; $s = xn_substr($s, 0, $len) . $more; return $s; } ?>【In
最新消息:雨落星辰是一个专注网站SEO优化、网站SEO诊断、搜索引擎研究、网络营销推广、网站策划运营及站长类的自媒体原创博客

【In

运维笔记admin55浏览0评论

【In

【In

In-Context Learning是最近比较火热的方向,其主要针对超大规模模型(例如1750B参数量的GPT-3模型),在只提供少量标注样本作为提示的前提下,即可以实现很惊艳的效果。本文将元学习引入到In-Context Learning中。
论文PDF:.18653/v1/2022.acl-long.53

一、动机

本文提出in-context tuning(ICT)用于few-shot learning

  • 目前,向语言模型通过prompting可以在小样本场景下得到很大的成功。例如GPT-3

For example, to coax the model into performing sentiment classification on the target input “This movie is a waste of time”, we prompt the LM with the sequence “I like the movie! Positive review? Yes. Horrible Movie! Positive review? No. This movie is a waste of time. Positive review? ___”, and predict “positive” if the next word is more likely to be “Yes” rather than “No”.

  • 然而原始的语言模型在预训练时并没有针对in-context进行优化

raw LMs are not optimized for in-context FSL during pre-training, and exhibit undesirable behavior when used for FSL

  • 先前工作发现prompting会过度受到(oversensitive)样本选取以及instruction本身影响

Previous work has also shown that prompting raw LMs is often oversensitive to example choices and instruction wording

二、方法

在训练(fine-tuning)阶段,给定一系列的训练task,每一个task都有相应的instruction,以及该task对应的少量样本(输入/输出对)。在测试阶段,给定一个新的unseen task,以及该task对应的instruction和少量样本(输入/输出对),旨在让模型能够对测试样本预测其类别。如下图,给定一个情感分析task:

在训练时,直接对instruction I T I_T IT​、若干少量标注数据 S T S_T ST​ 以及target样本 x t a r g e t T x_{target}^T xtargetT​ 进行拼接,并基于in-context learning训练目标进行优化,预测对应类别 y t a r g e t T y_{target}^T ytargetT​:

三、实验

(1)baseline模型
Raw In-context Learning

与in-context tuning一样,给定unseen task的instruction、少量标注样本(输入/输出对)和样本输入,直接预测样本的输出。此时没有fine-tuning过程,属于zero-shot learning场景。

Instruction-tuning + Fine-tuning

给定若干种类的task,基于instruction以及样本的输入,用于训练。在fine-tuning阶段,给定unseen task的instruction以及K个样本,进行fine-tuning。

Instruction-tuning

此时只有instruction和样本的输入,属于zero-shot learning场景。

MAML

给定若干task的instruction和一个样本输入,用于训练并预测目标。其与传统的MAML一样,只是训练目标变为instruction tuning

  • In-context tuning比原始的in-context learning效果好,说明直接对in-context learning obective进行训练是有效的;
  • MAML的效果超越了instruction-tuning,说明MAML是可以充分利用few-shot example来实现task adaptation;而本文提出的方法超越了MAML,则说明in-context tuning可以充分利用预训练语言模型的归纳偏置(inductive bias)
发布评论

评论列表(0)

  1. 暂无评论