from django.contrib import admin
from django.utils.safestring import mark_safe
from django.urls import path
from .views import eval_download_results, config_download_results
from .models import (
QA, Dataset, EvalAnswer,
LLMBackend, LLMModel,
EvalConfig, RoleMessage, EvalSession,
AnswerInterpreter
)
class QAAdmin(admin.ModelAdmin):
list_display = ('question', 'xid', 'dataset','category', 'correct_answer', 'target')
list_filter = ('target', 'dataset', 'category')
search_fields = ('question', 'correct_answer', 'category', 'extra_info')
fieldsets = (
(None, {'fields': ('dataset', 'question', 'category', 'extra_info', 'context', 'options', 'correct_answer', 'correct_answer_idx', 'target')}),
)
readonly_fields = ('hash',)
admin.site.register(QA, QAAdmin)
class DatasetAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
search_fields = ('name', 'description')
ordering = ('-created_at',)
fieldsets = (
(None, {'fields': ('name', 'description')}),
)
admin.site.register(Dataset, DatasetAdmin)
class EvalAnswerAdmin(admin.ModelAdmin):
list_display = ('question', 'get_question_id', 'is_correct', 'llm_model', 'eval_session')
list_filter = ('is_correct', 'llm_backend', 'llm_model', 'question__dataset', 'eval_session', 'eval_session__config')
search_fields = ('question', 'instruction', 'assistant_answer')
ordering = ('-created_at','question')
def get_question_id(self, obj):
return obj.question.id
get_question_id.short_description = 'Question ID'
get_question_id.admin_order_field = 'question__id'
admin.site.register(EvalAnswer, EvalAnswerAdmin)
class LLMBackendAdmin(admin.ModelAdmin):
list_display = ('name',)
search_fields = ('name',)
admin.site.register(LLMBackend, LLMBackendAdmin)
class LLMModelAdmin(admin.ModelAdmin):
list_display = ('name', 'backend')
list_filter = ('backend',)
search_fields = ('name',)
admin.site.register(LLMModel, LLMModelAdmin)
class RoleMessageAdmin(admin.ModelAdmin):
list_display = ('role', 'eval_config')
list_filter = ('role', 'eval_config')
search_fields = ('role', 'content')
admin.site.register(RoleMessage, RoleMessageAdmin)
class RoleMessageInline(admin.TabularInline):
model = RoleMessage
extra = 3
class EvalConfigAdmin(admin.ModelAdmin):
list_display = ('name', 'dataset', 'created_at', 'link')
search_fields = ('name', 'description')
ordering = ('-created_at',)
inlines = [RoleMessageInline]
@admin.display(description='Link')
def link(self, obj):
return mark_safe(f'Download Results')
def get_urls(self):
urls = super().get_urls()
my_urls = [path('/config_download_results/', config_download_results, name='config_download_results'),]
return my_urls + urls
admin.site.register(EvalConfig, EvalConfigAdmin)
class EvalSessionAdmin(admin.ModelAdmin):
list_display = ('id', 'name','is_active', 'config', 'llm_model', 'progress', 'accuracy', 'link')
list_filter = ('is_active','config', 'llm_model')
list_display_links = ["name"]
search_fields = ('name', 'config', 'llm_model')
ordering = ('-created_at',)
@admin.display(description='Link')
def link(self, obj):
return mark_safe(f'Download Results')
def get_urls(self):
urls = super().get_urls()
my_urls = [path('/eval_download_results/', eval_download_results, name='eval_download_results'),]
return my_urls + urls
def accuracy(self, obj):
return "{:.2%}".format(obj.accuracy)
def progress(self, obj):
total_counts_answered = obj.evalanswer_set.count()
total_counts = QA.objects.filter(dataset=obj.config.dataset).filter(target=obj.dataset_target).count()
return "{}/{}".format(total_counts_answered, total_counts)
admin.site.register(EvalSession, EvalSessionAdmin)
class AnswerInterpreterAdmin(admin.ModelAdmin):
list_display = ('name', 'llm_model')
search_fields = ('name', 'llm_model__name')
ordering = ('-created_at',)
admin.site.register(AnswerInterpreter, AnswerInterpreterAdmin)