From ef20bcabaa4e81795915fd5bf2854ed5bf1d7e17 Mon Sep 17 00:00:00 2001 From: edencfc <39668828+edencfc@users.noreply.github.com> Date: Thu, 13 May 2021 17:51:41 +0800 Subject: [PATCH 1/3] Add Malicious Webpage Detection Example Add Malicious Webpage Detection Example by PaddleNLP --- .../malicious_webpage_detection.ipynb | 820 ++++++++++++++++++ 1 file changed, 820 insertions(+) create mode 100644 paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb diff --git a/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb b/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb new file mode 100644 index 00000000..44e9548c --- /dev/null +++ b/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb @@ -0,0 +1,820 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "# 使用LSTM的恶意网页识别\n", + "\n", + "**作者:** [PaddlePaddle](https://github.com/PaddlePaddle)
\n", + "**日期:** 2021.05
\n", + "**摘要:** 本示例教程介绍如何使用飞桨完成一个恶意网页分类任务。通过使用飞桨搭建LSTM网络,组件一个网页内容分类的模型,并在示例的数据集上完成恶意网页的识别。" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "## 一、环境配置\n", + "\n", + "本教程基于Paddle 2.0 编写,如果你的环境不是本版本,请先参考官网[安装](https://www.paddlepaddle.org.cn/install/quick) Paddle 2.0 。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "!pip install lxml -i https://mirror.baidu.com/pypi/simple/\r\n", + "!pip install bs4 -i https://mirror.baidu.com/pypi/simple/\r\n", + "!pip install html5lib -i https://mirror.baidu.com/pypi/simple/" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.2\n" + ] + } + ], + "source": [ + "import os\n", + "import sys\n", + "import codecs\n", + "import chardet\n", + "import shutil\n", + "import time\n", + "import numpy as np\n", + "import pandas as pd\n", + "import jieba\n", + "from tqdm import tqdm, trange\n", + "from bs4 import BeautifulSoup\n", + "from functools import partial\n", + "import paddle\n", + "import paddlenlp\n", + "import paddle.nn as nn\n", + "import paddle.nn.functional as F\n", + "import paddlenlp as ppnlp\n", + "from paddlenlp.data import Pad, Stack, Tuple\n", + "from paddlenlp.datasets import MapDatasetWrapper\n", + "\n", + "print(paddle.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "## 二、数据加载\n", + "\n", + "### 2.1 数据集下载\n", + "\n", + "将使用 [https://www.heywhale.com/](https://cdn.kesci.com/%E6%81%B6%E6%84%8F%E7%BD%91%E9%A1%B5%E5%88%86%E6%9E%90.zip) 提供的恶意网页分析样本作为数据集,来完成本任务。该数据集含有169个恶意网页。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "!wget -c https://cdn.kesci.com/%E6%81%B6%E6%84%8F%E7%BD%91%E9%A1%B5%E5%88%86%E6%9E%90.zip && mv *.zip MaliciousWebpage.zip && unzip MaliciousWebpage.zip " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "### 2.2 读取文件信息\n", + "\n", + "读取文件列表信息。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "columns = ['id', 'flag', 'filename', 'url']\r\n", + "tempdf = pd.read_csv('MaliciousWebpage/file_list.txt', sep=',',skiprows=0, header=None, names=columns, skipfooter=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "### 2.3 使用下采样处理数据集不均衡问题\n", + "\n", + "在数据集中,正常网页样本有9700个,而恶意网页样本近169个,数据集严重不均衡,使用下采样的方法,随机筛选出500个正常网页的样本参与训练。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "n_page = tempdf[tempdf['flag']=='n']\r\n", + "# 对正常页面进行随机采样\r\n", + "n_page = n_page.sample(n=500)\r\n", + "# 提取全部被黑页面样本\r\n", + "d_page = tempdf[tempdf['flag']=='d']\r\n", + "# 合并样本\r\n", + "train_page = pd.concat([n_page,d_page],axis=0)\r\n", + "# 合并样本\r\n", + "train_page = pd.concat([n_page,d_page],axis=0)\r\n", + "# 做一个乱序\r\n", + "train_page = train_page.sample(frac = 1) " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "### 2.4 进行字符集编码处理\n", + "\n", + "解析数据集中网页内容时,可能出现因字符集编码不一致导致的读取错误,因此,要先对进行批量字符集编码转换,对数据集进行清洗。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "!mkdir TrainWebpage && mkdir TrainWebpage/file1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "for filename in tqdm(train_page['filename']):\n", + " # 这里要先做个判断,有file_list里面的文件不存在\n", + " if os.path.exists('MaliciousWebpage/file1/'+filename):\n", + " # 读取文件,获取字符集\n", + " content = codecs.open('MaliciousWebpage/file1/'+filename,'rb').read()\n", + " source_encoding = chardet.detect(content)['encoding']\n", + " # 个别文件的source_encoding是None,这里要先进行筛选\n", + " if source_encoding is None:\n", + " pass\n", + " # 只对字符集是gb2312格式的文件尝试转码\n", + " elif source_encoding == 'gb2312':\n", + " # 转码如果失败,就跳过该文件\n", + " try:\n", + " content = content.decode(source_encoding).encode('utf-8')\n", + " codecs.open('TrainWebpage/file1/'+filename,'wb').write(content)\n", + " except UnicodeDecodeError:\n", + " print(filename + \"读取失败\")\n", + " pass\n", + " # 字符集是utf-8格式的文件直接保存\n", + " elif source_encoding == 'utf-8':\n", + " codecs.open('TrainWebpage/file1/'+filename,'wb').write(content)\n", + " else:\n", + " pass\n", + " else:\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "### 2.5 提取网页内容,划分训练集、验证集、测试集\n", + "\n", + "被黑网页的一个典型特征是恶意插入的内容大量集中在HTML页面底部,因此可以提取网页末尾的HTML内容作为输入LSTM的文本信息。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "for i, filename in enumerate(train_page['filename']):\r\n", + " # 这里要先做个判断,有file_list里面的文件不存在\r\n", + " if os.path.exists('TrainWebpage/file1/'+filename):\r\n", + " # 读取文件,解析HTML页面\r\n", + " html = BeautifulSoup(open('TrainWebpage/file1/'+filename),'html.parser', from_encoding='utf-8')\r\n", + " text = ''.join(list(html.stripped_strings)[-20:])\r\n", + " # 去掉多余的换行符(部分数据最后解析结果为)\r\n", + " text = text.replace(\"\\n\", \"\")\r\n", + " text = text.replace(\" \", \",\")\r\n", + " # real_label = train_page['flag'][train_page['filename']==filename].values[0]\r\n", + " if i % 5 == 0:\r\n", + " if train_page['flag'][train_page['filename']==filename].values[0] == 'n':\r\n", + " with open(\"webtest.txt\",\"a+\") as f:\r\n", + " f.write(text[-100:] + '\\t' + '0' + '\\n')\r\n", + " elif train_page['flag'][train_page['filename']==filename].values[0] == 'd':\r\n", + " with open(\"webtest.txt\",\"a+\") as f:\r\n", + " f.write(text[-100:] + '\\t' + '1' + '\\n')\r\n", + " elif i % 5 == 1:\r\n", + " if train_page['flag'][train_page['filename']==filename].values[0] == 'n':\r\n", + " with open(\"webdev.txt\",\"a+\") as f:\r\n", + " f.write(text[-100:] + '\\t' + '0' + '\\n')\r\n", + " elif train_page['flag'][train_page['filename']==filename].values[0] == 'd':\r\n", + " with open(\"webdev.txt\",\"a+\") as f:\r\n", + " f.write(text[-100:] + '\\t' + '1' + '\\n')\r\n", + " else:\r\n", + " if train_page['flag'][train_page['filename']==filename].values[0] == 'n':\r\n", + " with open(\"webtrain.txt\",\"a+\") as f:\r\n", + " f.write(text[-100:] + '\\t' + '0' + '\\n')\r\n", + " elif train_page['flag'][train_page['filename']==filename].values[0] == 'd':\r\n", + " with open(\"webtrain.txt\",\"a+\") as f:\r\n", + " f.write(text[-100:] + '\\t' + '1' + '\\n')\r\n", + " else:\r\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "### 2.6 自定义数据集" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "class SelfDefinedDataset(paddle.io.Dataset):\n", + " def __init__(self, data):\n", + " super(SelfDefinedDataset, self).__init__()\n", + " self.data = data\n", + "\n", + " def __getitem__(self, idx):\n", + " return self.data[idx]\n", + "\n", + " def __len__(self):\n", + " return len(self.data)\n", + " \n", + " def get_labels(self):\n", + " return [\"0\", \"1\"]\n", + "\n", + "def txt_to_list(file_name):\n", + " res_list = []\n", + " for line in open(file_name):\n", + " res_list.append(line.strip().split('\\t'))\n", + " return res_list\n", + "\n", + "trainlst = txt_to_list('webtrain.txt')\n", + "devlst = txt_to_list('webdev.txt')\n", + "testlst = txt_to_list('webtest.txt')\n", + "\n", + "train_ds, dev_ds, test_ds = SelfDefinedDataset.get_datasets([trainlst, devlst, testlst])" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['0', '1']\n", + "['年以内,2万公里以内SUV1年以内易车二手车体验更好,速度更快立即前往APP看电脑版看微信版提意见购车热线:4000-189-167(,9:00,–,21:00,)易车二手车,m.taoche.com', '0']\n", + "['ipaime.com/thread-694853-1-1.htmlcoryphaei.com/forum.php?mod=viewthread&tid=3074054回复返回版块参与回复©,栖霞商业网', '0']\n", + "['大直街店集体课表人和国际健身俱乐部首页集体课表联系我们扫描二维码用手机访问本站由业界领先的搜狐快站免费提供技术支持人和国际健身俱乐部人和健身大直街店集体课表15小时前阅读Powered,by,搜狐快站', '0']\n", + "['个人帐户工作或学校帐户单位或学校未分配帐户?使用,Microsoft,帐户登录厌烦了这个帐户名称?重命名您的个人,Microsoft,帐户。©,2017,Microsoft使用条款隐私与,Cookie', '0']\n", + "['ONGAB4yONGAB4y精绝美女-在线直播在线播放-高清无水印九狮赌城-美女荷官All,rights,reserved.Copyright,©2016,&2017', '0']\n" + ] + } + ], + "source": [ + "# 准备标签\r\n", + "label_list = train_ds.get_labels()\r\n", + "print(label_list)\r\n", + "# 查看样本\r\n", + "for i in range(5):\r\n", + " print (train_ds[i])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "### 2.7 创建词表\n", + "\n", + "接下来创建中文的词表,词表的内容来自对训练集文本的切词。这份词表会用来将英文和中文的句子转换为词的ID构成的序列。词表中还加入了如下两个特殊的词:\n", + "- ``: 用来对较短的句子进行填充。\n", + "- ``: 表示未在词表中出现的词。\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Building prefix dict from the default dictionary ...\n", + "2021-05-13 17:36:40,318 - DEBUG - Building prefix dict from the default dictionary ...\n", + "Loading model from cache /tmp/jieba.cache\n", + "2021-05-13 17:36:40,321 - DEBUG - Loading model from cache /tmp/jieba.cache\n", + "Loading model cost 0.832 seconds.\n", + "2021-05-13 17:36:41,152 - DEBUG - Loading model cost 0.832 seconds.\n", + "Prefix dict has been built successfully.\n", + "2021-05-13 17:36:41,154 - DEBUG - Prefix dict has been built successfully.\n" + ] + } + ], + "source": [ + "dict_path = 'webdict.txt'\r\n", + "\r\n", + "#创建数据字典,存放位置:webdict.txt。在生成之前先清空webdict.txt\r\n", + "#在生成all_data.txt之前,首先将其清空\r\n", + "with open(dict_path, 'w') as f:\r\n", + " f.seek(0)\r\n", + " f.truncate() \r\n", + "\r\n", + "\r\n", + "dict_set = set()\r\n", + "train_data = open('webtrain.txt')\r\n", + "for data in train_data:\r\n", + " seg = jieba.lcut(data[:-3])\r\n", + " for datas in seg:\r\n", + " if not datas is \" \":\r\n", + " dict_set.add(datas)\r\n", + "\r\n", + "dicts = open(dict_path,'w')\r\n", + "dicts.write('[PAD]\\n')\r\n", + "dicts.write('[UNK]\\n')\r\n", + "for data in dict_set:\r\n", + " dicts.write(data + '\\n')\r\n", + "dicts.close()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[PAD] 0\n" + ] + } + ], + "source": [ + "# 加载词表\r\n", + "def load_vocab(vocab_file):\r\n", + " \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\r\n", + " vocab = {}\r\n", + " with open(vocab_file, \"r\", encoding=\"utf-8\") as reader:\r\n", + " tokens = reader.readlines()\r\n", + " for index, token in enumerate(tokens):\r\n", + " token = token.rstrip(\"\\n\").split(\"\\t\")[0]\r\n", + " vocab[token] = index\r\n", + " return vocab\r\n", + " \r\n", + "vocab = load_vocab('./webdict.txt')\r\n", + "\r\n", + "for k, v in vocab.items():\r\n", + " print(k, v)\r\n", + " break" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "## 三、网络搭建\n", + "\n", + "### 3.1 构造dataloder\n", + "\n", + "下面的`create_data_loader`函数用于创建运行和预测时所需要的`DataLoader`对象。\n", + "\n", + "- `paddle.io.DataLoader`返回一个迭代器,该迭代器根据`batch_sampler`指定的顺序迭代返回dataset数据。异步加载数据。\n", + " \n", + "- `batch_sampler`:DataLoader通过 batch\\_sampler 产生的mini-batch索引列表来 dataset 中索引样本并组成mini-batch\n", + " \n", + "- `collate_fn`:指定如何将样本列表组合为mini-batch数据。传给它参数需要是一个callable对象,需要实现对组建的batch的处理逻辑,并返回每个batch的数据。在这里传入的是`prepare_input`函数,对产生的数据进行pad操作,并返回实际长度等。" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def convert_example(example, vocab, unk_token_id=1, is_test=False):\r\n", + " \"\"\"\r\n", + " jieba 分词,转换id\r\n", + " \"\"\"\r\n", + "\r\n", + " input_ids = []\r\n", + " for token in jieba.cut(example[0]):\r\n", + " token_id = vocab.get(token, unk_token_id)\r\n", + " input_ids.append(token_id)\r\n", + " valid_length = np.array(len(input_ids), dtype='int64')\r\n", + "\r\n", + " if not is_test:\r\n", + " label = np.array(example[-1], dtype=\"int64\")\r\n", + " return input_ids, valid_length, label\r\n", + " else:\r\n", + " return input_ids, valid_length\r\n", + "\r\n", + "\r\n", + "def convert_tokens_to_ids(tokens, vocab):\r\n", + " \"\"\" Converts a token id (or a sequence of id) in a token string\r\n", + " (or a sequence of tokens), using the vocabulary.\r\n", + " \"\"\"\r\n", + "\r\n", + " ids = []\r\n", + " unk_id = vocab.get('[UNK]', None)\r\n", + " for token in tokens:\r\n", + " wid = vocab.get(token, unk_id)\r\n", + " if wid:\r\n", + " ids.append(wid)\r\n", + " return ids" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Reads data and generates mini-batches.\n", + "def create_dataloader(dataset,\n", + " trans_function=None,\n", + " mode='train',\n", + " batch_size=1,\n", + " pad_token_id=0,\n", + " batchify_fn=None):\n", + " if trans_function:\n", + " dataset = dataset.apply(trans_function, lazy=True)\n", + "\n", + " # return_list 数据是否以list形式返回\n", + " # collate_fn 指定如何将样本列表组合为mini-batch数据。传给它参数需要是一个callable对象,需要实现对组建的batch的处理逻辑,并返回每个batch的数据。在这里传入的是`prepare_input`函数,对产生的数据进行pad操作,并返回实际长度等。\n", + " dataloader = paddle.io.DataLoader(\n", + " dataset,\n", + " return_list=True,\n", + " batch_size=batch_size,\n", + " collate_fn=batchify_fn)\n", + " \n", + " return dataloader\n", + "\n", + "# python中的偏函数partial,把一个函数的某些参数固定住(也就是设置默认值),返回一个新的函数,调用这个新函数会更简单。\n", + "trans_function = partial(\n", + " convert_example,\n", + " vocab=vocab,\n", + " unk_token_id=vocab.get('[UNK]', 1),\n", + " is_test=False)\n", + "\n", + "# 将读入的数据batch化处理,便于模型batch化运算。\n", + "# batch中的每个句子将会padding到这个batch中的文本最大长度batch_max_seq_len。\n", + "# 当文本长度大于batch_max_seq时,将会截断到batch_max_seq_len;当文本长度小于batch_max_seq时,将会padding补齐到batch_max_seq_len.\n", + "batchify_fn = lambda samples, fn=Tuple(\n", + " Pad(axis=0, pad_val=vocab['[PAD]']), # input_ids\n", + " Stack(dtype=\"int64\"), # seq len\n", + " Stack(dtype=\"int64\") # label\n", + "): [data for data in fn(samples)]\n", + "\n", + "\n", + "train_loader = create_dataloader(\n", + " train_ds,\n", + " trans_function=trans_function,\n", + " batch_size=32,\n", + " mode='train',\n", + " batchify_fn=batchify_fn)\n", + "dev_loader = create_dataloader(\n", + " dev_ds,\n", + " trans_function=trans_function,\n", + " batch_size=32,\n", + " mode='validation',\n", + " batchify_fn=batchify_fn)\n", + "test_loader = create_dataloader(\n", + " test_ds,\n", + " trans_function=trans_function,\n", + " batch_size=32,\n", + " mode='test',\n", + " batchify_fn=batchify_fn)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "### 3.2 Encoder部分\n", + "\n", + "使用`LSTMencoder`搭建一个BiLSTM模型用于进行句子建模,得到句子的向量表示。\n", + "\n", + "然后接一个线性变换层,完成二分类任务。\n", + "\n", + "- `paddle.nn.Embedding`组建word-embedding层\n", + "- `ppnlp.seq2vec.LSTMEncoder`组建句子建模层\n", + "- `paddle.nn.Linear`构造二分类器" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "class LSTMModel(nn.Layer):\n", + " def __init__(self,\n", + " vocab_size,\n", + " num_classes,\n", + " emb_dim=64,\n", + " padding_idx=0,\n", + " lstm_hidden_size=96,\n", + " direction='forward',\n", + " lstm_layers=2,\n", + " dropout_rate=0,\n", + " pooling_type=None,\n", + " fc_hidden_size=48):\n", + " super().__init__()\n", + "\n", + " # 首先将输入word id 查表后映射成 word embedding\n", + " self.embedder = nn.Embedding(\n", + " num_embeddings=vocab_size,\n", + " embedding_dim=emb_dim,\n", + " padding_idx=padding_idx)\n", + "\n", + " # 将word embedding经过LSTMEncoder变换到文本语义表征空间中\n", + " self.lstm_encoder = ppnlp.seq2vec.LSTMEncoder(\n", + " emb_dim,\n", + " lstm_hidden_size,\n", + " num_layers=lstm_layers,\n", + " direction=direction,\n", + " dropout=dropout_rate,\n", + " pooling_type=pooling_type)\n", + "\n", + " # LSTMEncoder.get_output_dim()方法可以获取经过encoder之后的文本表示hidden_size\n", + " self.fc = nn.Linear(self.lstm_encoder.get_output_dim(), fc_hidden_size)\n", + "\n", + " # 最后的分类器\n", + " self.output_layer = nn.Linear(fc_hidden_size, num_classes)\n", + "\n", + " def forward(self, text, seq_len):\n", + " # text shape: (batch_size, num_tokens)\n", + " # print('input :', text.shape)\n", + " \n", + " # Shape: (batch_size, num_tokens, embedding_dim)\n", + " embedded_text = self.embedder(text)\n", + " # print('after word-embeding:', embedded_text.shape)\n", + "\n", + " # Shape: (batch_size, num_tokens, num_directions*lstm_hidden_size)\n", + " # num_directions = 2 if direction is 'bidirectional' else 1\n", + " text_repr = self.lstm_encoder(embedded_text, sequence_length=seq_len)\n", + " # print('after lstm:', text_repr.shape)\n", + "\n", + "\n", + " # Shape: (batch_size, fc_hidden_size)\n", + " fc_out = paddle.tanh(self.fc(text_repr))\n", + " # print('after Linear classifier:', fc_out.shape)\n", + "\n", + " # Shape: (batch_size, num_classes)\n", + " logits = self.output_layer(fc_out)\n", + " # print('output:', logits.shape)\n", + " \n", + " # probs 分类概率值\n", + " probs = F.softmax(logits, axis=-1)\n", + " # print('output probability:', probs.shape)\n", + " return probs\n", + "\n", + "model= LSTMModel(\n", + " len(vocab),\n", + " len(label_list),\n", + " direction='bidirectional',\n", + " padding_idx=vocab['[PAD]'])\n", + "model = paddle.Model(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "## 四、训练模型\n", + "\n", + "接下来开始训练模型。\n", + "\n", + "- 在训练过程中,使用VisualDL记录训练过程并进行可视化" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "optimizer = paddle.optimizer.Adam(\n", + " parameters=model.parameters(), learning_rate=1e-4)\n", + "\n", + "loss = paddle.nn.CrossEntropyLoss()\n", + "metric = paddle.metric.Accuracy()\n", + "\n", + "model.prepare(optimizer, loss, metric)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# 设置visualdl路径\r\n", + "log_dir = './visualdl'\r\n", + "callback = paddle.callbacks.VisualDL(log_dir=log_dir)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model.fit(train_loader, dev_loader, epochs=100, save_dir='./checkpoints', save_freq=5, callbacks=callback)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Eval begin...\n", + "The loss value printed in the log is the current batch, and the metric is the average value of previous step.\n", + "step 4/4 - loss: 0.5094 - acc: 0.8426 - 24ms/step\n", + "Eval samples: 108\n", + "Finally val acc: 0.84259\n" + ] + } + ], + "source": [ + "# 查看模型在验证集上的表现\r\n", + "results = model.evaluate(dev_loader)\r\n", + "print(\"Finally val acc: %.5f\" % results['acc'])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "## 五、预测效果\n", + "\n", + "根据你所使用的计算设备的不同,上面的训练过程可能需要不等的时间。\n", + "\n", + "完成上面的模型训练之后,可以得到一个能够通过网页内容识别恶意网页的模型。接下来查看模型在测试集上的泛化能力。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Predict begin...\n", + "step 4/4 [==============================] - ETA: 0s - 43ms/ste - 26ms/step \n", + "Predict samples: 107\n", + "Data: 机超声波保护膜搜狗推广校园推广标签剥离机塑壳断路器厦门婚纱摄影化妆品厂家酿酒设备台湾商务签证行李寄存西安酒店招聘深圳Vi设计不锈钢储罐长沙公司注册湘ICP备15010068号-1, 技术支持:湖南竞网 \t Label: 正常页面\n", + "Data: 如果喜欢这个话题,请点击右上角图标分享正在加载...正在加载... \t Label: 正常页面\n", + "Data: 43,站长邮箱:450376843@qq.com免责声明本站的文章和资源来自互联网或者站长的原创,如果有侵犯版权的资源请尽快联系站长,我们会在24h内删除有争议的资源.鄂ICP备15007646号-2 \t Label: 正常页面\n", + "Data: 9%的喜欢综艺娱乐的人都会关注这个公众号长按屏幕——识别图中二维码亲,您需要在App中购买后畅听哦~(iPhone新版即将上线,敬请期待)1.,点击右上角,“···”2.,选择,“在浏览器中打开”登录 \t Label: 正常页面\n", + "Data: 网娱乐城客服热线首页官方线上游戏的最佳选择澳门赌场娱乐城澳门官方直营新华娱乐城www.5599076.com钱柜娱乐城线上娱乐平台?皇冠现金网下载地址哪个娱乐网站好送68元,www.933jj.com \t Label: 被黑页面\n" + ] + } + ], + "source": [ + "label_map = {0: '正常页面', 1: '被黑页面'}\n", + "results = model.predict(test_loader, batch_size=128)[0]\n", + "predictions = []\n", + "\n", + "for batch_probs in results:\n", + " # 映射分类label\n", + " idx = np.argmax(batch_probs, axis=-1)\n", + " idx = idx.tolist()\n", + " labels = [label_map[i] for i in idx]\n", + " predictions.extend(labels)\n", + "\n", + "# 看看预测数据前5个样例分类结果\n", + "for idx, data in enumerate(test_ds.data[:5]):\n", + " print('Data: {} \\t Label: {}'.format(data[0], predictions[idx]))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "## The End\n", + "\n", + "你还可以通过变换网络结构,调整数据集,使用预训练模型,尝试不同的参数的方式来进一步提升本示例当中被黑网页识别的效果。同时,也可以尝试在其他的类似的任务中用飞桨来完成实际的实践。" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "PaddlePaddle 2.0.0b0 (Python 3.5)", + "language": "python", + "name": "py35-paddle1.2.0" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} \ No newline at end of file From 3e21f60fe7549890719b3148ec88a530fbe2ed97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B7=B1=E6=B8=8A=E4=B8=8A=E7=9A=84=E5=9D=91?= Date: Thu, 13 May 2021 23:12:40 +0800 Subject: [PATCH 2/3] make corrections based on comments --- .../malicious_webpage_detection.ipynb | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb b/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb index 44e9548c..d2e18b5c 100644 --- a/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb +++ b/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb @@ -8,7 +8,7 @@ "source": [ "# 使用LSTM的恶意网页识别\n", "\n", - "**作者:** [PaddlePaddle](https://github.com/PaddlePaddle)
\n", + "**作者:** [深渊上的坑](https://github.com/edencfc)
\n", "**日期:** 2021.05
\n", "**摘要:** 本示例教程介绍如何使用飞桨完成一个恶意网页分类任务。通过使用飞桨搭建LSTM网络,组件一个网页内容分类的模型,并在示例的数据集上完成恶意网页的识别。" ] @@ -26,7 +26,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": { "collapsed": false }, @@ -69,7 +69,6 @@ "import paddlenlp\n", "import paddle.nn as nn\n", "import paddle.nn.functional as F\n", - "import paddlenlp as ppnlp\n", "from paddlenlp.data import Pad, Stack, Tuple\n", "from paddlenlp.datasets import MapDatasetWrapper\n", "\n", @@ -442,7 +441,7 @@ "source": [ "## 三、网络搭建\n", "\n", - "### 3.1 构造dataloder\n", + "### 3.1 构造DataLoader\n", "\n", "下面的`create_data_loader`函数用于创建运行和预测时所需要的`DataLoader`对象。\n", "\n", @@ -747,7 +746,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "metadata": { "collapsed": false }, @@ -757,7 +756,7 @@ "output_type": "stream", "text": [ "Predict begin...\n", - "step 4/4 [==============================] - ETA: 0s - 43ms/ste - 26ms/step \n", + "step 4/4 [==============================] - ETA: 0s - 33ms/ste - 21ms/step \n", "Predict samples: 107\n", "Data: 机超声波保护膜搜狗推广校园推广标签剥离机塑壳断路器厦门婚纱摄影化妆品厂家酿酒设备台湾商务签证行李寄存西安酒店招聘深圳Vi设计不锈钢储罐长沙公司注册湘ICP备15010068号-1, 技术支持:湖南竞网 \t Label: 正常页面\n", "Data: 如果喜欢这个话题,请点击右上角图标分享正在加载...正在加载... \t Label: 正常页面\n", @@ -817,4 +816,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} From 09be98f0d7fa0852f3cac3eedec69eff570458b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B7=B1=E6=B8=8A=E4=B8=8A=E7=9A=84=E5=9D=91?= Date: Mon, 17 May 2021 14:25:14 +0800 Subject: [PATCH 3/3] =?UTF-8?q?=E5=88=A0=E9=99=A4lxml=E5=92=8Chtml5lib?= =?UTF-8?q?=E7=9A=84=E5=AE=89=E8=A3=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../malicious_webpage_detection.ipynb | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb b/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb index d2e18b5c..b4d6ea50 100644 --- a/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb +++ b/paddle2.0_docs/malicious_webpage_detection/malicious_webpage_detection.ipynb @@ -32,9 +32,7 @@ }, "outputs": [], "source": [ - "!pip install lxml -i https://mirror.baidu.com/pypi/simple/\r\n", - "!pip install bs4 -i https://mirror.baidu.com/pypi/simple/\r\n", - "!pip install html5lib -i https://mirror.baidu.com/pypi/simple/" + "!pip install bs4 -i https://mirror.baidu.com/pypi/simple/" ] }, { @@ -816,4 +814,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} +} \ No newline at end of file