Skip to content

Commit

Permalink
Merge pull request #748 from QData/fix-pytest-errors
Browse files Browse the repository at this point in the history
Fix pytest errors - due to goal_func
  • Loading branch information
qiyanjun authored Sep 11, 2023
2 parents 094025e + 987c926 commit e426102
Show file tree
Hide file tree
Showing 20 changed files with 3,885 additions and 3,710 deletions.
2,914 changes: 1,487 additions & 1,427 deletions docs/2notebook/0_End_to_End.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion docs/2notebook/1_Introduction_and_Transformations.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -927,7 +927,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
"version": "3.7.11"
}
},
"nbformat": 4,
Expand Down
17 changes: 4 additions & 13 deletions docs/2notebook/2_Constraints.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -76,15 +76,6 @@
"Let's import NLTK and download the required modules:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# cd .."
]
},
{
"cell_type": "code",
"execution_count": 11,
Expand Down Expand Up @@ -147,7 +138,7 @@
}
],
"source": [
"!pip3 install .\n",
"! pip3 install textattack[tensorflow]\n",
"\n",
"import nltk\n",
"\n",
Expand Down Expand Up @@ -826,7 +817,7 @@
")\n",
"attacker = Attacker(attack, dataset, attack_args)\n",
"\n",
"attacker.attack_dataset()"
"attack_results = attacker.attack_dataset()"
]
},
{
Expand Down Expand Up @@ -884,7 +875,7 @@
"provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
Expand All @@ -898,7 +889,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
"version": "3.7.11"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
Expand Down
164 changes: 82 additions & 82 deletions docs/2notebook/3_Augmentations.ipynb
Original file line number Diff line number Diff line change
@@ -1,29 +1,4 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Augmentation with TextAttack.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"cells": [
{
"cell_type": "markdown",
Expand Down Expand Up @@ -84,9 +59,11 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "5AXyxiLD4X93"
},
"outputs": [],
"source": [
"# import transformations, contraints, and the Augmenter\n",
"from textattack.transformations import WordSwapRandomCharacterDeletion\n",
Expand All @@ -97,38 +74,18 @@
"from textattack.constraints.pre_transformation import StopwordModification\n",
"\n",
"from textattack.augmentation import Augmenter"
],
"execution_count": null,
"outputs": []
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wFeXF_OL-vyw",
"outputId": "c041e77e-accd-4a58-88be-9b140dd0cd56"
},
"source": [
"# Set up transformation using CompositeTransformation()\n",
"transformation = CompositeTransformation(\n",
" [WordSwapRandomCharacterDeletion(), WordSwapQWERTY()]\n",
")\n",
"# Set up constraints\n",
"constraints = [RepeatModification(), StopwordModification()]\n",
"# Create augmenter with specified parameters\n",
"augmenter = Augmenter(\n",
" transformation=transformation,\n",
" constraints=constraints,\n",
" pct_words_to_swap=0.5,\n",
" transformations_per_example=10,\n",
")\n",
"s = \"What I cannot create, I do not understand.\"\n",
"# Augment!\n",
"augmenter.augment(s)"
],
"execution_count": null,
"outputs": [
{
"data": {
Expand All @@ -149,6 +106,24 @@
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Set up transformation using CompositeTransformation()\n",
"transformation = CompositeTransformation(\n",
" [WordSwapRandomCharacterDeletion(), WordSwapQWERTY()]\n",
")\n",
"# Set up constraints\n",
"constraints = [RepeatModification(), StopwordModification()]\n",
"# Create augmenter with specified parameters\n",
"augmenter = Augmenter(\n",
" transformation=transformation,\n",
" constraints=constraints,\n",
" pct_words_to_swap=0.5,\n",
" transformations_per_example=10,\n",
")\n",
"s = \"What I cannot create, I do not understand.\"\n",
"# Augment!\n",
"augmenter.augment(s)"
]
},
{
Expand All @@ -173,24 +148,14 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "WkYiVH6lQedu",
"outputId": "cd5ffc65-ca80-45cd-b3bb-d023bcad09a4"
},
"source": [
"# import the CheckListAugmenter\n",
"from textattack.augmentation import CheckListAugmenter\n",
"\n",
"# Alter default values if desired\n",
"augmenter = CheckListAugmenter(pct_words_to_swap=0.2, transformations_per_example=5)\n",
"s = \"I'd love to go to Japan but the tickets are 500 dollars\"\n",
"# Augment\n",
"augmenter.augment(s)"
],
"execution_count": null,
"outputs": [
{
"name": "stdout",
Expand Down Expand Up @@ -218,6 +183,16 @@
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# import the CheckListAugmenter\n",
"from textattack.augmentation import CheckListAugmenter\n",
"\n",
"# Alter default values if desired\n",
"augmenter = CheckListAugmenter(pct_words_to_swap=0.2, transformations_per_example=5)\n",
"s = \"I'd love to go to Japan but the tickets are 500 dollars\"\n",
"# Augment\n",
"augmenter.augment(s)"
]
},
{
Expand Down Expand Up @@ -248,42 +223,25 @@
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "l2b-4scuXvkA",
"outputId": "5a372fd2-226a-4970-a2c9-c09bf2af56c2"
},
"source": [
"from textattack.augmentation import WordNetAugmenter\n",
"\n",
"augmenter = WordNetAugmenter(\n",
" pct_words_to_swap=0.4,\n",
" transformations_per_example=5,\n",
" high_yield=True,\n",
" enable_advanced_metrics=True,\n",
")\n",
"s = \"I'd love to go to Japan but the tickets are 500 dollars\"\n",
"results = augmenter.augment(s)\n",
"print(f\"Average Original Perplexity Score: {results[1]['avg_original_perplexity']}\\n\")\n",
"print(f\"Average Augment Perplexity Score: {results[1]['avg_attack_perplexity']}\\n\")\n",
"print(f\"Average Augment USE Score: {results[2]['avg_attack_use_score']}\\n\")\n",
"print(f\"Augmentations:\")\n",
"results[0]"
],
"execution_count": 9,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"output_type": "stream",
"text": [
"Token indices sequence length is longer than the specified maximum sequence length for this model (1091 > 1024). Running this sequence through the model will result in indexing errors\n"
]
},
{
"output_type": "stream",
"name": "stdout",
"output_type": "stream",
"text": [
"Average Original Perplexity Score: 1.09\n",
"\n",
Expand All @@ -295,7 +253,6 @@
]
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"[\"I'd bang to operate to Japan but the ticket are 500 buck\",\n",
Expand Down Expand Up @@ -373,9 +330,27 @@
" \"I'd screw to plump to Nihon but the tickets are 500 clam\"]"
]
},
"execution_count": 9,
"metadata": {},
"execution_count": 9
"output_type": "execute_result"
}
],
"source": [
"from textattack.augmentation import WordNetAugmenter\n",
"\n",
"augmenter = WordNetAugmenter(\n",
" pct_words_to_swap=0.4,\n",
" transformations_per_example=5,\n",
" high_yield=True,\n",
" enable_advanced_metrics=True,\n",
")\n",
"s = \"I'd love to go to Japan but the tickets are 500 dollars\"\n",
"results = augmenter.augment(s)\n",
"print(f\"Average Original Perplexity Score: {results[1]['avg_original_perplexity']}\\n\")\n",
"print(f\"Average Augment Perplexity Score: {results[1]['avg_attack_perplexity']}\\n\")\n",
"print(f\"Average Augment USE Score: {results[2]['avg_attack_use_score']}\\n\")\n",
"print(f\"Augmentations:\")\n",
"results[0]"
]
},
{
Expand All @@ -388,5 +363,30 @@
"We have now went through the basics in running `Augmenter` by either creating a new augmenter from scratch or using a pre-built augmenter. This could be done in as few as 4 lines of code so please give it a try if you haven't already! 🐙"
]
}
]
}
],
"metadata": {
"colab": {
"name": "Augmentation with TextAttack.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.11"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
25 changes: 23 additions & 2 deletions docs/2notebook/4_Custom_Datasets_Word_Embedding.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -537,10 +537,31 @@
"\n",
"attack = Attack(goal_function, constraints, transformation, search_method)\n",
"\n",
"# here is a legacy code piece showing how the attack runs in details \n",
"for example, label in custom_dataset:\n",
" result = attack.attack(example, label)\n",
" print(result.__str__(color_method=\"ansi\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# here is currently recommendated API-centric way to use customized attack\n",
"\n",
"from textattack.loggers import CSVLogger # tracks a dataframe for us.\n",
"from textattack.attack_results import SuccessfulAttackResult\n",
"from textattack import Attacker, AttackArgs\n",
"\n",
"attack_args = AttackArgs(\n",
" num_successful_examples=5, log_to_csv=\"results.csv\", csv_coloring_style=\"html\"\n",
")\n",
"attacker = Attacker(attack, dataset, attack_args)\n",
"\n",
"attack_results = attacker.attack_dataset()"
]
}
],
"metadata": {
Expand All @@ -549,7 +570,7 @@
"provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
Expand All @@ -563,7 +584,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
"version": "3.7.11"
}
},
"nbformat": 4,
Expand Down
Loading

0 comments on commit e426102

Please sign in to comment.