{"payload":{"header_redesign_enabled":false,"results":[{"id":"324433599","archived":false,"color":"#DA5B0B","followers":1,"has_funding_file":false,"hl_name":"datatrigger/interpretable_machine_learning","hl_trunc_description":"Getting explanations for predictions made by black box models.","language":"Jupyter Notebook","mirror":false,"owned_by_organization":false,"public":true,"repo":{"repository":{"id":324433599,"name":"interpretable_machine_learning","owner_id":63914607,"owner_login":"datatrigger","updated_at":"2021-01-24T17:05:07.465Z","has_issues":true}},"sponsorable":false,"topics":["lime","interpretability","black-box-model","feature-importance","interpretable-ml","surrogate-models","interpretable-machine-learning","shap","explainability","explainable-machine-learning","shapley-additive-explanations"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":50,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Adatatrigger%252Finterpretable_machine_learning%2B%2Blanguage%253A%2522Jupyter%2BNotebook%2522","metadata":null,"csrf_tokens":{"/datatrigger/interpretable_machine_learning/star":{"post":"i4-gjb3y4M2xFNa9VRJ8fzi3cVVpAZwDUXv6RcegpXVP3tELQLf6k1-lAOuK9WM4jsymk5d46fUm7-3HfIpWyQ"},"/datatrigger/interpretable_machine_learning/unstar":{"post":"9gm6pgH9NzE5d7_YiI_SyXdr4EGCyHXULl9n3ZW-saTmIORYRmn91DfDxzZlRDkl7pAa6soOej0l95QeHVkJ5Q"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"TYwTMFMNFj22Ihyk07JAadcd5p1xowPaW9AT3sqNPuWtrcmZQBcvOdvRo9aa1pBda_-k69LA-OT4DwlcfRLrRw"}}},"title":"Repository search results"}