Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: refurbish Python code by applying refurb linter rules #8296

Merged
merged 13 commits into from
Sep 12, 2024
24 changes: 8 additions & 16 deletions api/controllers/console/admin.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,23 +60,15 @@ def post(self):

site = app.site
if not site:
desc = args["desc"] if args["desc"] else ""
copy_right = args["copyright"] if args["copyright"] else ""
privacy_policy = args["privacy_policy"] if args["privacy_policy"] else ""
custom_disclaimer = args["custom_disclaimer"] if args["custom_disclaimer"] else ""
desc = args["desc"] or ""
copy_right = args["copyright"] or ""
privacy_policy = args["privacy_policy"] or ""
custom_disclaimer = args["custom_disclaimer"] or ""
else:
desc = site.description if site.description else args["desc"] if args["desc"] else ""
copy_right = site.copyright if site.copyright else args["copyright"] if args["copyright"] else ""
privacy_policy = (
site.privacy_policy if site.privacy_policy else args["privacy_policy"] if args["privacy_policy"] else ""
)
custom_disclaimer = (
site.custom_disclaimer
if site.custom_disclaimer
else args["custom_disclaimer"]
if args["custom_disclaimer"]
else ""
)
desc = site.description or args["desc"] or ""
copy_right = site.copyright or args["copyright"] or ""
privacy_policy = site.privacy_policy or args["privacy_policy"] or ""
custom_disclaimer = site.custom_disclaimer or args["custom_disclaimer"] or ""

recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args["app_id"]).first()

Expand Down
8 changes: 2 additions & 6 deletions api/controllers/console/app/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,14 +99,10 @@ def post(self, app_model):
and app_model.workflow.features_dict
):
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice")
voice = args.get("voice") or text_to_speech.get("voice")
else:
try:
voice = (
args.get("voice")
if args.get("voice")
else app_model.app_model_config.text_to_speech_dict.get("voice")
)
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
except Exception:
voice = None
response = AudioService.transcript_tts(app_model=app_model, text=text, message_id=message_id, voice=voice)
Expand Down
2 changes: 1 addition & 1 deletion api/controllers/console/auth/oauth.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def _generate_account(provider: str, user_info: OAuthUserInfo):

if not account:
# Create account
account_name = user_info.name if user_info.name else "Dify"
account_name = user_info.name or "Dify"
account = RegisterService.register(
email=user_info.email, name=account_name, password=None, open_id=user_info.id, provider=provider
)
Expand Down
7 changes: 1 addition & 6 deletions api/controllers/console/datasets/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -550,12 +550,7 @@ class DatasetApiBaseUrlApi(Resource):
@login_required
@account_initialization_required
def get(self):
return {
"api_base_url": (
dify_config.SERVICE_API_URL if dify_config.SERVICE_API_URL else request.host_url.rstrip("/")
)
+ "/v1"
}
return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}


class DatasetRetrievalSettingApi(Resource):
Expand Down
8 changes: 2 additions & 6 deletions api/controllers/console/explore/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,14 +86,10 @@ def post(self, installed_app):
and app_model.workflow.features_dict
):
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice")
voice = args.get("voice") or text_to_speech.get("voice")
else:
try:
voice = (
args.get("voice")
if args.get("voice")
else app_model.app_model_config.text_to_speech_dict.get("voice")
)
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
except Exception:
voice = None
response = AudioService.transcript_tts(app_model=app_model, message_id=message_id, voice=voice, text=text)
Expand Down
2 changes: 1 addition & 1 deletion api/controllers/console/workspace/tool_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ def post(self):

return ApiToolManageService.test_api_tool_preview(
current_user.current_tenant_id,
args["provider_name"] if args["provider_name"] else "",
args["provider_name"] or "",
args["tool_name"],
args["credentials"],
args["parameters"],
Expand Down
8 changes: 2 additions & 6 deletions api/controllers/service_api/app/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,10 @@ def post(self, app_model: App, end_user: EndUser):
and app_model.workflow.features_dict
):
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice")
voice = args.get("voice") or text_to_speech.get("voice")
else:
try:
voice = (
args.get("voice")
if args.get("voice")
else app_model.app_model_config.text_to_speech_dict.get("voice")
)
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
except Exception:
voice = None
response = AudioService.transcript_tts(
Expand Down
8 changes: 2 additions & 6 deletions api/controllers/web/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,10 @@ def post(self, app_model: App, end_user):
and app_model.workflow.features_dict
):
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice")
voice = args.get("voice") or text_to_speech.get("voice")
else:
try:
voice = (
args.get("voice")
if args.get("voice")
else app_model.app_model_config.text_to_speech_dict.get("voice")
)
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
except Exception:
voice = None

Expand Down
2 changes: 1 addition & 1 deletion api/core/agent/cot_agent_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage):
model=model_instance.model,
prompt_messages=prompt_messages,
message=AssistantPromptMessage(content=final_answer),
usage=llm_usage["usage"] if llm_usage["usage"] else LLMUsage.empty_usage(),
usage=llm_usage["usage"] or LLMUsage.empty_usage(),
system_fingerprint="",
)
),
Expand Down
2 changes: 1 addition & 1 deletion api/core/agent/fc_agent_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage):
model=model_instance.model,
prompt_messages=prompt_messages,
message=AssistantPromptMessage(content=final_answer),
usage=llm_usage["usage"] if llm_usage["usage"] else LLMUsage.empty_usage(),
usage=llm_usage["usage"] or LLMUsage.empty_usage(),
system_fingerprint="",
)
),
Expand Down
8 changes: 4 additions & 4 deletions api/core/app/apps/base_app_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def organize_prompt_messages(
app_mode=AppMode.value_of(app_record.mode),
prompt_template_entity=prompt_template_entity,
inputs=inputs,
query=query if query else "",
query=query or "",
files=files,
context=context,
memory=memory,
Expand Down Expand Up @@ -189,7 +189,7 @@ def organize_prompt_messages(
prompt_messages = prompt_transform.get_prompt(
prompt_template=prompt_template,
inputs=inputs,
query=query if query else "",
query=query or "",
files=files,
context=context,
memory_config=memory_config,
Expand Down Expand Up @@ -238,7 +238,7 @@ def direct_output(
model=app_generate_entity.model_conf.model,
prompt_messages=prompt_messages,
message=AssistantPromptMessage(content=text),
usage=usage if usage else LLMUsage.empty_usage(),
usage=usage or LLMUsage.empty_usage(),
),
),
PublishFrom.APPLICATION_MANAGER,
Expand Down Expand Up @@ -351,7 +351,7 @@ def moderation_for_inputs(
tenant_id=tenant_id,
app_config=app_generate_entity.app_config,
inputs=inputs,
query=query if query else "",
query=query or "",
message_id=message_id,
trace_manager=app_generate_entity.trace_manager,
)
Expand Down
4 changes: 2 additions & 2 deletions api/core/extension/extensible.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import json
import logging
import os
from pathlib import Path
from typing import Any, Optional

from pydantic import BaseModel
Expand Down Expand Up @@ -63,8 +64,7 @@ def scan_extensions(cls):

builtin_file_path = os.path.join(subdir_path, "__builtin__")
if os.path.exists(builtin_file_path):
with open(builtin_file_path, encoding="utf-8") as f:
position = int(f.read().strip())
position = int(Path(builtin_file_path).read_text(encoding="utf-8").strip())
position_map[extension_name] = position

if (extension_name + ".py") not in file_names:
Expand Down
2 changes: 1 addition & 1 deletion api/core/memory/token_buffer_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def get_history_prompt_messages(
)

if message_limit and message_limit > 0:
message_limit = message_limit if message_limit <= 500 else 500
message_limit = min(message_limit, 500)
else:
message_limit = 500

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ def _invoke_result_generator(
model=real_model,
prompt_messages=prompt_messages,
message=prompt_message,
usage=usage if usage else LLMUsage.empty_usage(),
usage=usage or LLMUsage.empty_usage(),
system_fingerprint=system_fingerprint,
),
credentials=credentials,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ def _handle_chat_generate_stream_response(
),
)
elif isinstance(chunk, ContentBlockDeltaEvent):
chunk_text = chunk.delta.text if chunk.delta.text else ""
chunk_text = chunk.delta.text or ""
full_assistant_content += chunk_text

# transform assistant message to prompt message
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def _invoke_result_generator(
model=real_model,
prompt_messages=prompt_messages,
message=prompt_message,
usage=usage if usage else LLMUsage.empty_usage(),
usage=usage or LLMUsage.empty_usage(),
system_fingerprint=system_fingerprint,
),
credentials=credentials,
Expand Down
10 changes: 4 additions & 6 deletions api/core/model_runtime/model_providers/azure_openai/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def _handle_generate_stream_response(
continue

# transform assistant message to prompt message
text = delta.text if delta.text else ""
text = delta.text or ""
assistant_prompt_message = AssistantPromptMessage(content=text)

full_text += text
Expand Down Expand Up @@ -400,15 +400,13 @@ def _handle_chat_generate_stream_response(
continue

# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=tool_calls
)
assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls)

full_assistant_content += delta.delta.content if delta.delta.content else ""
full_assistant_content += delta.delta.content or ""

real_model = chunk.model
system_fingerprint = chunk.system_fingerprint
completion += delta.delta.content if delta.delta.content else ""
completion += delta.delta.content or ""

yield LLMResultChunk(
model=real_model,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str
)
for i in range(len(sentences))
]
for index, future in enumerate(futures):
for future in futures:
yield from future.result().__enter__().iter_bytes(1024)

else:
Expand Down
8 changes: 4 additions & 4 deletions api/core/model_runtime/model_providers/bedrock/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,10 +331,10 @@ def _handle_converse_stream_response(
elif "contentBlockDelta" in chunk:
delta = chunk["contentBlockDelta"]["delta"]
if "text" in delta:
chunk_text = delta["text"] if delta["text"] else ""
chunk_text = delta["text"] or ""
full_assistant_content += chunk_text
assistant_prompt_message = AssistantPromptMessage(
content=chunk_text if chunk_text else "",
content=chunk_text or "",
)
index = chunk["contentBlockDelta"]["contentBlockIndex"]
yield LLMResultChunk(
Expand Down Expand Up @@ -751,7 +751,7 @@ def _handle_generate_response(
elif model_prefix == "cohere":
output = response_body.get("generations")[0].get("text")
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
completion_tokens = self.get_num_tokens(model, credentials, output if output else "")
completion_tokens = self.get_num_tokens(model, credentials, output or "")

else:
raise ValueError(f"Got unknown model prefix {model_prefix} when handling block response")
Expand Down Expand Up @@ -828,7 +828,7 @@ def _handle_generate_stream_response(

# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=content_delta if content_delta else "",
content=content_delta or "",
)
index += 1

Expand Down
4 changes: 2 additions & 2 deletions api/core/model_runtime/model_providers/chatglm/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,11 +302,11 @@ def _handle_chat_generate_stream_response(
if delta.delta.function_call:
function_calls = [delta.delta.function_call]

assistant_message_tool_calls = self._extract_response_tool_calls(function_calls if function_calls else [])
assistant_message_tool_calls = self._extract_response_tool_calls(function_calls or [])

# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_message_tool_calls
content=delta.delta.content or "", tool_calls=assistant_message_tool_calls
)

if delta.finish_reason is not None:
Expand Down
6 changes: 3 additions & 3 deletions api/core/model_runtime/model_providers/localai/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -511,7 +511,7 @@ def _handle_completion_generate_stream_response(
delta = chunk.choices[0]

# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(content=delta.text if delta.text else "", tool_calls=[])
assistant_prompt_message = AssistantPromptMessage(content=delta.text or "", tool_calls=[])

if delta.finish_reason is not None:
# temp_assistant_prompt_message is used to calculate usage
Expand Down Expand Up @@ -578,11 +578,11 @@ def _handle_chat_generate_stream_response(
if delta.delta.function_call:
function_calls = [delta.delta.function_call]

assistant_message_tool_calls = self._extract_response_tool_calls(function_calls if function_calls else [])
assistant_message_tool_calls = self._extract_response_tool_calls(function_calls or [])

# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_message_tool_calls
content=delta.delta.content or "", tool_calls=assistant_message_tool_calls
)

if delta.finish_reason is not None:
Expand Down
4 changes: 2 additions & 2 deletions api/core/model_runtime/model_providers/minimax/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ def _handle_chat_generate_stream_response(
index=0,
message=AssistantPromptMessage(content=message.content, tool_calls=[]),
usage=usage,
finish_reason=message.stop_reason if message.stop_reason else None,
finish_reason=message.stop_reason or None,
),
)
elif message.function_call:
Expand Down Expand Up @@ -244,7 +244,7 @@ def _handle_chat_generate_stream_response(
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content=message.content, tool_calls=[]),
finish_reason=message.stop_reason if message.stop_reason else None,
finish_reason=message.stop_reason or None,
),
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def _invoke(
inputs = []
used_tokens = 0

for i, text in enumerate(texts):
for text in texts:
# Here token count is only an approximation based on the GPT2 tokenizer
num_tokens = self._get_num_tokens_by_gpt2(text)

Expand Down
8 changes: 3 additions & 5 deletions api/core/model_runtime/model_providers/openai/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ def _handle_generate_stream_response(
continue

# transform assistant message to prompt message
text = delta.text if delta.text else ""
text = delta.text or ""
assistant_prompt_message = AssistantPromptMessage(content=text)

full_text += text
Expand Down Expand Up @@ -760,11 +760,9 @@ def _handle_chat_generate_stream_response(
final_tool_calls.extend(tool_calls)

# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=tool_calls
)
assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls)

full_assistant_content += delta.delta.content if delta.delta.content else ""
full_assistant_content += delta.delta.content or ""

if has_finish_reason:
final_chunk = LLMResultChunk(
Expand Down
Loading