ci: Make token parsing more robust

Start by finding our known prefix, stripping only that off, then
validating that the rest of the data makes sense. This fixes certain
tokens which were erroneously getting rejected.

Signed-off-by: Daniel Stone <daniels@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/31270>
This commit is contained in:
Daniel Stone
2024-09-20 01:29:55 +02:00
committed by Marge Bot
parent 1f9f7ae112
commit 9f21e90a0c

View File

@@ -99,19 +99,23 @@ def get_token_from_default_dir() -> str:
def validate_gitlab_token(token: str) -> bool:
token_suffix = token.split("-")[-1]
# Match against recognised token prefixes
token_suffix = None
for token_type, token_prefix in TOKEN_PREFIXES.items():
if token.startswith(token_prefix):
logging.info(f"Found probable token type: {token_type}")
token_suffix = token[len(token_prefix):]
break
if not token_suffix:
return False
# Basic validation of the token suffix based on:
# https://gitlab.com/gitlab-org/gitlab/-/blob/master/gems/gitlab-secret_detection/lib/gitleaks.toml
if not re.match(r"(\w+-)?[0-9a-zA-Z_\-]{20,64}", token_suffix):
return False
for token_type, token_prefix in TOKEN_PREFIXES.items():
if token.startswith(token_prefix):
logging.info(f"Found probable token type: {token_type}")
return True
# If the token type is not recognized, return False
return False
return True
def get_token_from_arg(token_arg: str | Path | None) -> str | None: