Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions bugbug/code_search/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,9 @@ def search(repo_dir, commit_hash, symbol_name):
return []

logger.error(
f"Error running 'hg grep' command.\nstdout:\n{e.stdout.decode()}\n\nstderr:\n{e.stderr.decode()}"
"Error running 'hg grep' command.\nstdout:\n %s \n\nstderr:\n %s",
e.stdout.decode(),
e.stderr.decode(),
)
raise

Expand All @@ -60,7 +62,9 @@ def search(repo_dir, commit_hash, symbol_name):
)
except subprocess.CalledProcessError as e:
logger.error(
f"Error running 'hg cat' command.\nstdout:\n{e.stdout.decode()}\n\nstderr:\n{e.stderr.decode()}"
"Error running 'hg cat' command.\nstdout:\n%s\n\nstderr:\n%s",
e.stdout.decode(),
e.stderr.decode(),
)
raise

Expand Down
21 changes: 13 additions & 8 deletions bugbug/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def print_feature_importances(self, important_features, class_probabilities=None

# allow maximum of 3 columns in a row to fit the page better
COLUMNS = 3
logger.info("Top {} features:".format(len(top_feature_names)))
logger.info("Top %s features:", len(top_feature_names))
for i in range(0, len(top_feature_names), COLUMNS):
table = []
for item in shap_val:
Expand Down Expand Up @@ -380,7 +380,7 @@ def train(self, importance_cutoff=0.15, limit=None):
X = X[:limit]
y = y[:limit]

logger.info(f"X: {X.shape}, y: {y.shape}")
logger.info("X: %s , y: %s", X.shape, y.shape)

is_multilabel = isinstance(y[0], np.ndarray)
is_binary = len(self.class_names) == 2
Expand Down Expand Up @@ -408,11 +408,14 @@ def train(self, importance_cutoff=0.15, limit=None):
"std": score.std() * 2,
}
logger.info(
f"{scoring.capitalize()}: f{score.mean()} (+/- {score.std() * 2})"
"%s: f%.4f (+/- %.4f)",
scoring.capitalize(),
score.mean(),
(score.std() * 2),
)

logger.info(f"X_train: {X_train.shape}, y_train: {y_train.shape}")
logger.info(f"X_test: {X_test.shape}, y_test: {y_test.shape}")
logger.info("X_train: %s, y_train: %s", X_train.shape, y_train.shape)
logger.info("X_test: %s, y_test: %s", X_test.shape, y_test.shape)

self.clf.fit(X_train, self.le.transform(y_train))
logger.info("Number of features: %d", self.clf.steps[-1][1].n_features_in_)
Expand Down Expand Up @@ -480,7 +483,7 @@ def train(self, importance_cutoff=0.15, limit=None):
"The predictions should be multilabel"
)

logger.info(f"No confidence threshold - {len(y_test)} classified")
logger.info("No confidence threshold - %d classified", len(y_test))
if is_multilabel:
confusion_matrix = metrics.multilabel_confusion_matrix(y_test, y_pred)
else:
Expand Down Expand Up @@ -548,7 +551,9 @@ def train(self, importance_cutoff=0.15, limit=None):
)

logger.info(
f"\nConfidence threshold > {confidence_threshold} - {classified_num} classified"
"\nConfidence threshold > %s - %d classified",
confidence_threshold,
classified_num,
)
if is_multilabel:
confusion_matrix = metrics.multilabel_confusion_matrix(
Expand Down Expand Up @@ -579,7 +584,7 @@ def train(self, importance_cutoff=0.15, limit=None):
X_train = X
y_train = y

logger.info(f"X_train: {X_train.shape}, y_train: {y_train.shape}")
logger.info("X_train: %s, y_train: %s", X_train.shape, y_train.shape)

self.clf.fit(X_train, self.le.transform(y_train))

Expand Down
2 changes: 1 addition & 1 deletion bugbug/models/rcatype.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def get_rca_from_whiteboard(self, whiteboard_data):
)

if rca_whiteboard_split[1] not in self.RCA_LIST:
logger.warning(rca_whiteboard_split[1] + " not in RCA_LIST")
logger.warning("%s not in RCA_LIST", rca_whiteboard_split[1])
else:
rca.append(rca_whiteboard_split[1])
return rca
Expand Down
4 changes: 2 additions & 2 deletions bugbug/models/testselect.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def _get_cost(config: str) -> int:
if all(s in config for s in substrings):
return cost

logger.warning(f"Couldn't find cost for {config}")
logger.warning("Couldn't find cost for %s", config)
return max(cost for _, cost in costs)


Expand Down Expand Up @@ -309,7 +309,7 @@ def select_configs(

for group in groups:
if group not in equivalence_sets:
logger.warning(f"No equivalence sets for group {group}")
logger.warning("No equivalence sets for group %s", group)
continue
# Create constraints to ensure at least one task from each set of equivalent
# groups is selected.
Expand Down
18 changes: 8 additions & 10 deletions bugbug/phabricator.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def get_testing_project(rev: RevisionDict) -> str | None:
]

if len(testing_projects) > 1:
logger.warning("Revision D{} has more than one testing tag.".format(rev["id"]))
logger.warning("Revision D%s has more than one testing tag.", rev["id"])

if len(testing_projects) == 0:
return None
Expand Down Expand Up @@ -237,7 +237,7 @@ def get_first_review_time(rev: RevisionDict) -> timedelta | None:
) = get_review_dates(rev)

if creation_date is None:
logger.warning("Revision D{} has no creation date.".format(rev["id"]))
logger.warning("Revision D%s has no creation date.", rev["id"])
return None

if len(review_dates) == 0:
Expand All @@ -253,7 +253,7 @@ def get_first_review_time(rev: RevisionDict) -> timedelta | None:
and first_exclusion_end_date is not None
and first_exclusion_start_date > first_exclusion_end_date
):
logger.warning("Revision D{} was in an inconsistent state.".format(rev["id"]))
logger.warning("Revision D%s was in an inconsistent state.", rev["id"])

if (
first_exclusion_start_date is None
Expand All @@ -264,9 +264,8 @@ def get_first_review_time(rev: RevisionDict) -> timedelta | None:
first_exclusion_end_date is None or first_exclusion_end_date > first_review_date
):
logger.warning(
"Revision D{} was accepted while in 'planned changes' or 'closed' state.".format(
rev["id"]
)
"Revision D%s was accepted while in 'planned changes' or 'closed' state.",
rev["id"],
)
return first_review_date - creation_date
else:
Expand All @@ -284,7 +283,7 @@ def get_pending_review_time(rev: RevisionDict) -> timedelta | None:
creation_date, _, exclusion_start_dates, exclusion_end_dates = get_review_dates(rev)

if creation_date is None:
logger.warning("Revision D{} has no creation date.".format(rev["id"]))
logger.warning("Revision D%s has no creation date.", rev["id"])
return None

last_exclusion_start_date = max(exclusion_start_dates, default=None)
Expand All @@ -295,9 +294,8 @@ def get_pending_review_time(rev: RevisionDict) -> timedelta | None:
or last_exclusion_start_date > last_exclusion_end_date
):
logger.warning(
"Revision D{} was in an inconsistent state (needs review, but is in an exception timespan).".format(
rev["id"]
)
"Revision D%s was in an inconsistent state (needs review, but is in an exception timespan).",
rev["id"],
)

if last_exclusion_end_date is not None:
Expand Down
20 changes: 15 additions & 5 deletions bugbug/repository.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,9 @@ def set_commit_metrics(
try:
get_space_metrics(commit.metrics, after_metrics["spaces"])
except AnalysisException:
logger.debug(f"rust-code-analysis error on commit {commit.node}, path {path}")
logger.debug(
"rust-code-analysis error on commit %s, path %s", commit.node, path
)

before_metrics_dict = get_total_metrics_dict()
try:
Expand All @@ -679,7 +681,9 @@ def set_commit_metrics(
before_metrics_dict, before_metrics["spaces"], calc_summaries=False
)
except AnalysisException:
logger.debug(f"rust-code-analysis error on commit {commit.node}, path {path}")
logger.debug(
"rust-code-analysis error on commit %s, path %s", commit.node, path
)

commit.metrics_diff = {
f"{metric}_total": commit.metrics[f"{metric}_total"]
Expand All @@ -704,7 +708,10 @@ def set_commit_metrics(
get_space_metrics(metrics_dict, func, calc_summaries=False)
except AnalysisException:
logger.debug(
f"rust-code-analysis error on commit {commit.node}, path {path}, function {func['name']}"
"rust-code-analysis error on commit %s, path %s, function %s}",
commit.node,
path,
func["name"],
)

commit.functions[path].append(
Expand Down Expand Up @@ -734,7 +741,7 @@ def transform(hg: hglib.client, repo_dir: str, commit: Commit) -> Commit:
try:
patch_data = rs_parsepatch.get_lines(patch)
except Exception:
logger.error(f"Exception while analyzing {commit.node}")
logger.error("Exception while analyzing %s", commit.node)
raise

for stats in patch_data:
Expand Down Expand Up @@ -1185,7 +1192,10 @@ def update_complex_experiences(
)
else:
logger.warning(
f"Experience missing for file {orig}, type '{commit_type}', on commit {commit.node}"
"Experience missing for file %s, type '%s', on commit %s",
orig,
commit_type,
commit.node,
)

if (
Expand Down
Loading