Skip to content

Commit

Permalink
reformat
Browse files Browse the repository at this point in the history
  • Loading branch information
yangheng95 committed Aug 30, 2023
1 parent 13a317b commit 6176650
Show file tree
Hide file tree
Showing 19 changed files with 11 additions and 20 deletions.
1 change: 0 additions & 1 deletion examples/attack/attack_keras_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ def __init__(self, model):
self.model = model

def __call__(self, text_input_list):

x_transform = []
for i, review in enumerate(text_input_list):
tokens = [x.strip(",") for x in review.split()]
Expand Down
1 change: 0 additions & 1 deletion textattack/attack_recipes/morpheus_tan_2020.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ class MorpheusTan2020(AttackRecipe):

@staticmethod
def build(model_wrapper):

#
# Goal is to minimize BLEU score between the model output given for the
# perturbed input sequence and the reference translation
Expand Down
1 change: 0 additions & 1 deletion textattack/attack_recipes/seq2sick_cheng_2018_blackbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ class Seq2SickCheng2018BlackBox(AttackRecipe):

@staticmethod
def build(model_wrapper, goal_function="non_overlapping"):

#
# Goal is non-overlapping output.
#
Expand Down
1 change: 0 additions & 1 deletion textattack/commands/augment_command.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ def run(self, args):

args = textattack.AugmenterArgs(**vars(args))
if args.interactive:

print("\nRunning in interactive mode...\n")
augmenter = eval(AUGMENTATION_RECIPE_NAMES[args.recipe])(
pct_words_to_swap=args.pct_words_to_swap,
Expand Down
2 changes: 1 addition & 1 deletion textattack/commands/eval_model_command.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def test_model_on_dataset(self, args):
while i < min(args.num_examples, len(dataset)):
dataset_batch = dataset[i : min(args.num_examples, i + args.batch_size)]
batch_inputs = []
for (text_input, ground_truth_output) in dataset_batch:
for text_input, ground_truth_output in dataset_batch:
attacked_text = textattack.shared.AttackedText(text_input)
batch_inputs.append(attacked_text.tokenizer_input)
ground_truth_outputs.append(ground_truth_output)
Expand Down
1 change: 0 additions & 1 deletion textattack/constraints/overlap/max_words_perturbed.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ def __init__(
self.max_percent = max_percent

def _check_constraint(self, transformed_text, reference_text):

num_words_diff = len(transformed_text.all_words_diff(reference_text))
if self.max_percent:
min_num_words = min(len(transformed_text.words), len(reference_text.words))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def __init__(self, threshold=0.8, large=False, metric="angular", **kwargs):
try:
self.model = hub.load(self._tfhub_url)
except Exception as e:
print('Error loading model from tfhub, trying mirror url')
print("Error loading model from tfhub, trying mirror url")
self.model = hub.load(self.mirror_tfhub_url)

def encode(self, sentences):
Expand All @@ -47,5 +47,5 @@ def __setstate__(self, state):
try:
self.model = hub.load(self._tfhub_url)
except Exception as e:
print('Error loading model from tfhub, trying mirror url')
print("Error loading model from tfhub, trying mirror url")
self.model = hub.load(self.mirror_tfhub_url)
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def encode(self, sentences):
try:
self.model = hub.load(self._tfhub_url)
except Exception as e:
print('Error loading model from tfhub, trying mirror url')
print("Error loading model from tfhub, trying mirror url")
self.model = hub.load(self.mirror_tfhub_url)
return self.model(sentences).numpy()

Expand All @@ -49,5 +49,5 @@ def __setstate__(self, state):
try:
self.model = hub.load(self._tfhub_url)
except Exception as e:
print('Error loading model from tfhub, trying mirror url')
print("Error loading model from tfhub, trying mirror url")
self.model = hub.load(self.mirror_tfhub_url)
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ def __init__(
num_queries,
ground_truth_output,
):

super().__init__(
attacked_text,
raw_output,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ def __init__(
num_queries,
ground_truth_output,
):

super().__init__(
attacked_text,
raw_output,
Expand Down
1 change: 0 additions & 1 deletion textattack/loggers/weights_and_biases_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ class WeightsAndBiasesLogger(Logger):
"""Logs attack results to Weights & Biases."""

def __init__(self, **kwargs):

global wandb
wandb = LazyLoader("wandb", globals(), "wandb")

Expand Down
1 change: 0 additions & 1 deletion textattack/metrics/quality_metrics/perplexity.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ def calculate(self, results):
return self.all_metrics

def calc_ppl(self, texts):

with torch.no_grad():
text = " ".join(texts)
eval_loss = []
Expand Down
1 change: 0 additions & 1 deletion textattack/search_methods/greedy_word_swap_wir.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def _get_index_order(self, initial_text):
# compute the largest change in score we can find by swapping each word
delta_ps = []
for idx in indices_to_order:

# Exit Loop when search_over is True - but we need to make sure delta_ps
# is the same size as softmax_saliency_scores
if search_over:
Expand Down
1 change: 1 addition & 0 deletions textattack/shared/attacked_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,7 @@ def ith_word_diff(self, other_attacked_text: AttackedText, i: int) -> bool:

def words_diff_num(self, other_attacked_text: AttackedText) -> int:
"""The number of words different between two AttackedText objects."""

# using edit distance to calculate words diff num
def generate_tokens(words):
result = {}
Expand Down
5 changes: 4 additions & 1 deletion textattack/shared/validators.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,10 @@
r"^textattack.models.helpers.word_cnn_for_classification.*",
r"^transformers.modeling_\w*\.\w*ForSequenceClassification$",
],
(NonOverlappingOutput, MinimizeBleu,): [
(
NonOverlappingOutput,
MinimizeBleu,
): [
r"^textattack.models.helpers.t5_for_text_to_text.*",
],
}
Expand Down
1 change: 0 additions & 1 deletion textattack/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,6 @@ def collate_fn(data):
is_adv_sample = []
for item in data:
if "_example_type" in item[0].keys():

# Get example type value from OrderedDict and remove it

adv = item[0].pop("_example_type")
Expand Down
1 change: 0 additions & 1 deletion textattack/training_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,6 @@ def _create_dataset_from_args(cls, args):
train_dataset.output_column == "label"
and eval_dataset.output_column == "label"
):

train_dataset_labels = train_dataset._dataset["label"]

eval_dataset_labels = eval_dataset._dataset["label"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def _get_transformations(self, current_text, indices_to_modify):
return transformed_texts

def _get_replacement_words(self, word, word_part_of_speech):

replacement_words = []
tag = word_part_of_speech
if (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def _get_transformations(self, current_text, indices_to_modify):

# replace original numbers with new numbers
transformed_texts = []
for (idx, word) in num_words:
for idx, word in num_words:
replacement_words = self._get_new_number(word)
for r in replacement_words:
if r == word:
Expand Down

0 comments on commit 6176650

Please sign in to comment.