Skip to content

Commit

Permalink
Merge pull request #749 from QData/format-updates
Browse files Browse the repository at this point in the history
format update
  • Loading branch information
qiyanjun authored Sep 11, 2023
2 parents e426102 + 9cdaa48 commit bde7a36
Show file tree
Hide file tree
Showing 17 changed files with 29 additions and 37 deletions.
10 changes: 5 additions & 5 deletions docs/2notebook/0_End_to_End.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1430,7 +1430,7 @@
"metadata": {},
"outputs": [],
"source": [
"!textattack attack --model cnn-yelp --num-examples 3 --search-method greedy-word-wir --transformation word-swap-wordnet --constraints cola^max_diff=0.1 bert-score^min_bert_score=0.7 --enable-advance-metrics \n"
"!textattack attack --model cnn-yelp --num-examples 3 --search-method greedy-word-wir --transformation word-swap-wordnet --constraints cola^max_diff=0.1 bert-score^min_bert_score=0.7 --enable-advance-metrics"
]
},
{
Expand All @@ -1439,7 +1439,7 @@
"metadata": {},
"outputs": [],
"source": [
"!textattack attack --model lstm-mr --recipe deepwordbug --num-examples 2 --attack-n --enable-advance-metrics \n"
"!textattack attack --model lstm-mr --recipe deepwordbug --num-examples 2 --attack-n --enable-advance-metrics"
]
},
{
Expand All @@ -1448,7 +1448,7 @@
"metadata": {},
"outputs": [],
"source": [
"!textattack attack --model lstm-mr --recipe hotflip --num-examples 4 --num-examples-offset 3 --enable-advance-metrics "
"!textattack attack --model lstm-mr --recipe hotflip --num-examples 4 --num-examples-offset 3 --enable-advance-metrics"
]
},
{
Expand All @@ -1457,7 +1457,7 @@
"metadata": {},
"outputs": [],
"source": [
"!textattack attack --model-from-huggingface distilbert-base-uncased-finetuned-sst-2-english --dataset-from-huggingface glue^sst2^train --recipe deepwordbug --num-examples 3 --enable-advance-metrics\n"
"!textattack attack --model-from-huggingface distilbert-base-uncased-finetuned-sst-2-english --dataset-from-huggingface glue^sst2^train --recipe deepwordbug --num-examples 3 --enable-advance-metrics"
]
},
{
Expand All @@ -1466,7 +1466,7 @@
"metadata": {},
"outputs": [],
"source": [
"! textattack attack --model cnn-imdb --attack-from-file tests/sample_inputs/attack_from_file.py^Attack --num-examples 2 --num-examples-offset 18 --attack-n "
"! textattack attack --model cnn-imdb --attack-from-file tests/sample_inputs/attack_from_file.py^Attack --num-examples 2 --num-examples-offset 18 --attack-n"
]
}
],
Expand Down
2 changes: 1 addition & 1 deletion docs/2notebook/4_Custom_Datasets_Word_Embedding.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@
"\n",
"attack = Attack(goal_function, constraints, transformation, search_method)\n",
"\n",
"# here is a legacy code piece showing how the attack runs in details \n",
"# here is a legacy code piece showing how the attack runs in details\n",
"for example, label in custom_dataset:\n",
" result = attack.attack(example, label)\n",
" print(result.__str__(color_method=\"ansi\"))"
Expand Down
1 change: 0 additions & 1 deletion docs/2notebook/Example_3_Keras.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,6 @@
" self.model = model\n",
"\n",
" def __call__(self, text_input_list):\n",
"\n",
" x_transform = []\n",
" for i, review in enumerate(text_input_list):\n",
" tokens = [x.strip(\",\") for x in review.split()]\n",
Expand Down
16 changes: 3 additions & 13 deletions docs/2notebook/Example_5_Explain_BERT.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -273,19 +273,9 @@
" list_of_text = []\n",
" number = input_ids.size()[0]\n",
" for i in range(number):\n",
" ii = (\n",
" input_ids[\n",
" i,\n",
" ]\n",
" .cpu()\n",
" .numpy()\n",
" )\n",
" tt = token_type_ids[\n",
" i,\n",
" ]\n",
" am = attention_mask[\n",
" i,\n",
" ]\n",
" ii = input_ids[i,].cpu().numpy()\n",
" tt = token_type_ids[i,]\n",
" am = attention_mask[i,]\n",
" txt = tokenizer.decode(ii, skip_special_tokens=True)\n",
" list_of_text.append(txt)\n",
" return list_of_text\n",
Expand Down
2 changes: 1 addition & 1 deletion tests/test_attacked_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def test_window_around_index(self, attacked_text):

def test_big_window_around_index(self, attacked_text):
assert (
attacked_text.text_window_around_index(0, 10 ** 5) + "."
attacked_text.text_window_around_index(0, 10**5) + "."
) == attacked_text.text

def test_window_around_index_start(self, attacked_text):
Expand Down
4 changes: 2 additions & 2 deletions tests/test_word_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ def test_embedding_paragramcf():
word_embedding = WordEmbedding.counterfitted_GLOVE_embedding()
assert pytest.approx(word_embedding[0][0]) == -0.022007
assert pytest.approx(word_embedding["fawn"][0]) == -0.022007
assert word_embedding[10 ** 9] is None
assert word_embedding[10**9] is None


def test_embedding_gensim():
Expand All @@ -37,7 +37,7 @@ def test_embedding_gensim():
word_embedding = GensimWordEmbedding(keyed_vectors)
assert pytest.approx(word_embedding[0][0]) == 1
assert pytest.approx(word_embedding["bye-bye"][0]) == -1 / np.sqrt(2)
assert word_embedding[10 ** 9] is None
assert word_embedding[10**9] is None

# test query functionality
assert pytest.approx(word_embedding.get_cos_sim(1, 3)) == 0
Expand Down
4 changes: 2 additions & 2 deletions textattack/attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def __init__(
constraints: List[Union[Constraint, PreTransformationConstraint]],
transformation: Transformation,
search_method: SearchMethod,
transformation_cache_size=2 ** 15,
constraint_cache_size=2 ** 15,
transformation_cache_size=2**15,
constraint_cache_size=2**15,
):
"""Initialize an attack object.
Expand Down
4 changes: 2 additions & 2 deletions textattack/attack_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,8 +504,8 @@ class _CommandLineAttackArgs:
interactive: bool = False
parallel: bool = False
model_batch_size: int = 32
model_cache_size: int = 2 ** 18
constraint_cache_size: int = 2 ** 18
model_cache_size: int = 2**18
constraint_cache_size: int = 2**18

@classmethod
def _add_parser_args(cls, parser):
Expand Down
2 changes: 1 addition & 1 deletion textattack/constraints/grammaticality/cola.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def __init__(

self.max_diff = max_diff
self.model_name = model_name
self._reference_score_cache = lru.LRU(2 ** 10)
self._reference_score_cache = lru.LRU(2**10)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = HuggingFaceModelWrapper(model, tokenizer)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(self):
self.sess, self.graph, self.PBTXT_PATH, self.CKPT_PATH
)

self.lm_cache = lru.LRU(2 ** 18)
self.lm_cache = lru.LRU(2**18)

def clear_cache(self):
self.lm_cache.clear()
Expand Down
2 changes: 1 addition & 1 deletion textattack/constraints/grammaticality/part_of_speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def __init__(
self.language_nltk = language_nltk
self.language_stanza = language_stanza

self._pos_tag_cache = lru.LRU(2 ** 14)
self._pos_tag_cache = lru.LRU(2**14)
if tagger_type == "flair":
if tagset == "universal":
self._flair_pos_tagger = SequenceTagger.load("upos-fast")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def __init__(self, embedding=None, **kwargs):
def clear_cache(self):
self._get_thought_vector.cache_clear()

@functools.lru_cache(maxsize=2 ** 10)
@functools.lru_cache(maxsize=2**10)
def _get_thought_vector(self, text):
"""Sums the embeddings of all the words in ``text`` into a "thought
vector"."""
Expand Down
2 changes: 1 addition & 1 deletion textattack/goal_functions/goal_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(
use_cache=True,
query_budget=float("inf"),
model_batch_size=32,
model_cache_size=2 ** 20,
model_cache_size=2**20,
):
validators.validate_model_goal_function_compatibility(
self.__class__, model_wrapper.model.__class__
Expand Down
2 changes: 1 addition & 1 deletion textattack/goal_functions/text/minimize_bleu.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def extra_repr_keys(self):
return ["maximizable", "target_bleu"]


@functools.lru_cache(maxsize=2 ** 12)
@functools.lru_cache(maxsize=2**12)
def get_bleu(a, b):
ref = a.words
hyp = b.words
Expand Down
4 changes: 2 additions & 2 deletions textattack/goal_functions/text/non_overlapping_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,12 @@ def _get_score(self, model_output, _):
return num_words_diff / len(get_words_cached(self.ground_truth_output))


@functools.lru_cache(maxsize=2 ** 12)
@functools.lru_cache(maxsize=2**12)
def get_words_cached(s):
return np.array(words_from_text(s))


@functools.lru_cache(maxsize=2 ** 12)
@functools.lru_cache(maxsize=2**12)
def word_difference_score(s1, s2):
"""Returns the number of words that are non-overlapping between s1 and
s2."""
Expand Down
2 changes: 1 addition & 1 deletion textattack/metrics/attack_metrics/words_perturbed.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def calculate(self, results):
self.total_attacks = len(self.results)
self.all_num_words = np.zeros(len(self.results))
self.perturbed_word_percentages = np.zeros(len(self.results))
self.num_words_changed_until_success = np.zeros(2 ** 16)
self.num_words_changed_until_success = np.zeros(2**16)
self.max_words_changed = 0

for i, result in enumerate(self.results):
Expand Down
5 changes: 4 additions & 1 deletion textattack/shared/validators.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@
r"^textattack.models.helpers.word_cnn_for_classification.*",
r"^transformers.modeling_\w*\.\w*ForSequenceClassification$",
],
(NonOverlappingOutput, MinimizeBleu,): [
(
NonOverlappingOutput,
MinimizeBleu,
): [
r"^textattack.models.helpers.t5_for_text_to_text.*",
],
}
Expand Down

0 comments on commit bde7a36

Please sign in to comment.