[{"data":1,"prerenderedAt":1082},["ShallowReactive",2],{"i-lucide:menu":3,"i-lucide:x":8,"i-kinnu:logo":10,"i-kinnu:origami-folding":13,"blog-category-research":17,"i-lucide:chevron-right":1080},{"left":4,"top":4,"width":5,"height":5,"rotate":4,"vFlip":6,"hFlip":6,"body":7},0,24,false,"\u003Cpath fill=\"none\" stroke=\"currentColor\" stroke-linecap=\"round\" stroke-linejoin=\"round\" stroke-width=\"2\" d=\"M4 5h16M4 12h16M4 19h16\"/>",{"left":4,"top":4,"width":5,"height":5,"rotate":4,"vFlip":6,"hFlip":6,"body":9},"\u003Cpath fill=\"none\" stroke=\"currentColor\" stroke-linecap=\"round\" stroke-linejoin=\"round\" stroke-width=\"2\" d=\"M18 6L6 18M6 6l12 12\"/>",{"left":4,"top":4,"width":11,"height":11,"rotate":4,"vFlip":6,"hFlip":6,"body":12},27,"\u003Cg fill=\"none\">\u003Cpath d=\"M0.046875 1.05555C0.046875 1.03541 0.048197 1.01579 0.0507438 0.996728C0.0987149 0.438619 0.586845 0 1.18194 0H25.4398C26.451 0 26.9575 1.171 26.2424 1.85585L15.7301 11.9243L1.31574 0.903476C1.17475 0.79568 1.01137 0.761884 0.859586 0.784111L26.2936 25.1441C27.0086 25.829 26.5022 27 25.4909 27H1.18194C0.555061 27 0.046875 26.5133 0.046875 25.9129V1.05555Z\" fill=\"currentColor\"/>\u003C/g>",{"left":4,"top":4,"width":14,"height":15,"rotate":4,"vFlip":6,"hFlip":6,"body":16},1000,236,"\u003Cg fill=\"none\">\u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M193.68 38.2238C195.994 38.2238 197.87 40.0989 197.87 42.412V231.812C197.87 234.125 195.994 236 193.68 236H4.19013C1.87603 236 2.02305e-07 234.125 0 231.812V42.412C-2.02305e-07 40.0989 1.87603 38.2238 4.19013 38.2238H193.68ZM111.76 89.0072C111.685 87.9474 110.572 87.2905 109.608 87.7376L96.8872 93.641C95.7786 94.1554 95.702 95.7016 96.7545 96.3225L101.579 99.167C94.7045 109.365 90.5733 122.892 90.5732 137.642C90.5733 154.323 95.8569 169.439 104.416 179.945C105.301 181.032 106.9 181.196 107.987 180.311C109.075 179.426 109.238 177.828 108.353 176.741C100.621 167.25 95.6522 153.305 95.6521 137.642C95.6522 123.661 99.6138 111.051 105.963 101.754L110.456 104.403C111.508 105.024 112.826 104.21 112.74 102.991L111.76 89.0072ZM9.63194 136.286C9.14864 136.286 8.75684 136.678 8.75684 137.161C8.7569 137.644 9.14868 138.035 9.63194 138.035H17.2161C17.6993 138.035 18.0912 137.644 18.0912 137.161C18.0912 136.678 17.6994 136.286 17.2161 136.286H9.63194ZM22.6813 136.286C22.198 136.286 21.8062 136.678 21.8062 137.161C21.8063 137.644 22.1981 138.035 22.6813 138.035H30.2655C30.7487 138.035 31.1406 137.644 31.1406 137.161C31.1406 136.678 30.7488 136.286 30.2655 136.286H22.6813ZM35.7464 136.286C35.2631 136.286 34.8713 136.678 34.8713 137.161C34.8713 137.644 35.2631 138.035 35.7464 138.035H44.4973C44.9805 138.035 45.3724 137.644 45.3724 137.161C45.3724 136.678 44.9806 136.286 44.4973 136.286H35.7464ZM49.9977 136.286C49.5144 136.286 49.1226 136.678 49.1226 137.161C49.1226 137.644 49.5144 138.035 49.9977 138.035H57.5819C58.0651 138.035 58.4569 137.644 58.457 137.161C58.457 136.678 58.0651 136.286 57.5819 136.286H49.9977ZM63.0783 136.286C62.595 136.286 62.2032 136.678 62.2032 137.161C62.2033 137.644 62.5951 138.035 63.0783 138.035H70.6625C71.1457 138.035 71.5375 137.644 71.5376 137.161C71.5376 136.678 71.1457 136.286 70.6625 136.286H63.0783ZM76.1277 136.286C75.6444 136.286 75.2526 136.678 75.2526 137.161C75.2527 137.644 75.6445 138.035 76.1277 138.035H83.7119C84.1951 138.035 84.5869 137.644 84.587 137.161C84.587 136.678 84.1951 136.286 83.7119 136.286H76.1277ZM102.266 136.286C101.782 136.286 101.39 136.678 101.39 137.161C101.391 137.644 101.782 138.035 102.266 138.035H109.85C110.333 138.035 110.725 137.644 110.725 137.161C110.725 136.678 110.333 136.286 109.85 136.286H102.266ZM115.338 136.286C114.855 136.286 114.463 136.678 114.463 137.161C114.463 137.644 114.855 138.035 115.338 138.035H122.923C123.406 138.035 123.798 137.644 123.798 137.161C123.798 136.678 123.406 136.286 122.923 136.286H115.338ZM128.403 136.286C127.92 136.286 127.528 136.678 127.528 137.161C127.528 137.644 127.92 138.035 128.403 138.035H135.988C136.471 138.035 136.863 137.644 136.863 137.161C136.863 136.678 136.471 136.286 135.988 136.286H128.403ZM141.468 136.286C140.985 136.286 140.593 136.678 140.593 137.161C140.593 137.644 140.985 138.035 141.468 138.035H149.053C149.536 138.035 149.928 137.644 149.928 137.161C149.928 136.678 149.536 136.286 149.053 136.286H141.468ZM154.541 136.286C154.058 136.286 153.666 136.678 153.666 137.161C153.666 137.644 154.058 138.035 154.541 138.035H162.125C162.609 138.035 163 137.644 163.001 137.161C163.001 136.678 162.609 136.286 162.125 136.286H154.541ZM167.614 136.286C167.131 136.286 166.739 136.678 166.739 137.161C166.739 137.644 167.131 138.035 167.614 138.035H175.198C175.681 138.035 176.073 137.644 176.073 137.161C176.073 136.678 175.681 136.286 175.198 136.286H167.614ZM180.671 136.286C180.188 136.286 179.796 136.678 179.796 137.161C179.796 137.644 180.188 138.035 180.671 138.035H188.255C188.739 138.035 189.13 137.644 189.131 137.161C189.131 136.678 188.739 136.286 188.255 136.286H180.671Z\"\n    fill=\"currentColor\" />\n  \u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M444.85 38.2277C447.164 38.2277 449.04 40.1028 449.04 42.4159V132.928C449.04 135.241 447.164 137.116 444.85 137.116H255.36C253.046 137.116 251.17 135.241 251.17 132.928V42.4159C251.17 40.1028 253.046 38.2277 255.36 38.2277H444.85ZM361.96 125.388C361.618 125.046 361.064 125.046 360.722 125.388L354.534 131.572C354.192 131.914 354.192 132.468 354.534 132.81C354.876 133.151 355.43 133.151 355.772 132.81L361.96 126.624C362.301 126.283 362.301 125.73 361.96 125.388ZM371.047 116.311C370.705 115.969 370.15 115.969 369.809 116.311L364.446 121.671C364.104 122.012 364.104 122.567 364.446 122.908C364.788 123.249 365.342 123.25 365.684 122.908L371.047 117.548C371.388 117.207 371.388 116.652 371.047 116.311ZM380.124 107.246C379.782 106.904 379.227 106.904 378.885 107.246L373.523 112.606C373.181 112.948 373.181 113.502 373.523 113.844C373.864 114.185 374.419 114.185 374.761 113.844L380.124 108.483C380.465 108.142 380.465 107.587 380.124 107.246ZM385.736 65.8841C385.891 64.6727 384.622 63.7845 383.536 64.3434L371.069 70.7636C370.124 71.2504 369.96 72.5334 370.752 73.2424L381.2 82.5938C382.11 83.4081 383.561 82.8672 383.717 81.6557L384.393 76.3725C391.143 77.1933 398.567 80.7709 404.771 86.9711C411.124 93.3213 414.726 100.952 415.43 107.827C415.573 109.221 416.819 110.236 418.214 110.093C419.609 109.95 420.624 108.703 420.481 107.309C419.644 99.1317 415.435 90.4514 408.362 83.3817C401.466 76.489 393.038 72.3185 385.038 71.338L385.736 65.8841ZM389.2 98.1733C388.859 97.8319 388.304 97.8318 387.962 98.1733L382.6 103.534C382.258 103.875 382.258 104.429 382.6 104.771C382.941 105.112 383.496 105.112 383.838 104.771L389.2 99.4108C389.542 99.0693 389.542 98.5149 389.2 98.1733ZM398.262 89.1047C397.92 88.7633 397.365 88.7632 397.024 89.1047L391.661 94.4649C391.319 94.8065 391.319 95.3608 391.661 95.7024C392.002 96.0436 392.557 96.0438 392.899 95.7024L398.262 90.3421C398.603 90.0007 398.603 89.4463 398.262 89.1047ZM416.431 70.9616C416.089 70.6202 415.534 70.6201 415.193 70.9616L409.83 76.3218C409.488 76.6634 409.488 77.2177 409.83 77.5592C410.172 77.9005 410.726 77.9007 411.068 77.5592L416.431 72.199C416.772 71.8575 416.772 71.3032 416.431 70.9616ZM425.508 61.891C425.166 61.5496 424.611 61.5495 424.27 61.891L418.907 67.2512C418.565 67.5928 418.565 68.1471 418.907 68.4887C419.249 68.8299 419.803 68.8301 420.145 68.4887L425.508 63.1284C425.849 62.787 425.849 62.2326 425.508 61.891ZM434.569 52.8146C434.227 52.4731 433.673 52.4731 433.331 52.8146L427.968 58.1748C427.626 58.5163 427.627 59.0706 427.968 59.4122C428.31 59.7534 428.864 59.7537 429.206 59.4122L434.569 54.052C434.91 53.7105 434.91 53.1562 434.569 52.8146ZM443.638 43.7479C443.296 43.4065 442.742 43.4064 442.4 43.7479L437.037 49.1081C436.695 49.4496 436.696 50.004 437.037 50.3455C437.379 50.6868 437.933 50.687 438.275 50.3455L443.638 44.9853C443.98 44.6438 443.979 44.0895 443.638 43.7479Z\"\n    fill=\"currentColor\" />\n  \u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M684.066 38.2277C687.798 38.2281 689.667 42.7391 687.027 45.3773L596.473 135.889C595.687 136.675 594.621 137.116 593.51 137.116H506.335C504.021 137.116 502.145 135.241 502.145 132.928V42.4159C502.145 40.1028 504.021 38.2277 506.335 38.2277H684.066ZM514.603 124.566C514.261 124.224 513.707 124.224 513.365 124.566L507.178 130.751C506.836 131.093 506.836 131.646 507.178 131.988C507.519 132.329 508.073 132.329 508.415 131.988L514.603 125.803C514.945 125.462 514.945 124.908 514.603 124.566ZM523.689 115.491C523.348 115.15 522.794 115.15 522.452 115.491L517.09 120.852C516.748 121.193 516.748 121.747 517.09 122.088C517.431 122.43 517.985 122.43 518.327 122.088L523.689 116.728C524.031 116.386 524.031 115.833 523.689 115.491ZM532.102 65.8295C530.707 65.6872 529.46 66.7017 529.318 68.0957C529.175 69.4896 530.189 70.7355 531.584 70.8787C538.463 71.5825 546.096 75.1826 552.45 81.5329C558.723 87.8037 562.312 95.3226 563.079 102.13L557.738 102.392C556.518 102.452 555.865 103.855 556.607 104.827L565.115 115.969C565.76 116.814 567.051 116.751 567.611 115.847L574.992 103.928C575.635 102.889 574.848 101.555 573.628 101.615L568.161 101.882C568.161 101.878 568.162 101.874 568.161 101.871C567.324 93.6931 563.114 85.0124 556.041 77.9425C548.968 70.873 540.283 66.6668 532.102 65.8295ZM532.766 106.421C532.425 106.079 531.871 106.079 531.529 106.421L526.166 111.781C525.825 112.123 525.825 112.676 526.166 113.018C526.508 113.359 527.062 113.359 527.403 113.018L532.766 107.657C533.108 107.316 533.108 106.762 532.766 106.421ZM541.843 97.3445C541.501 97.003 540.948 97.003 540.606 97.3445L535.243 102.705C534.901 103.046 534.902 103.6 535.243 103.941C535.585 104.283 536.139 104.283 536.48 103.941L541.843 98.5809C542.185 98.2393 542.185 97.686 541.843 97.3445ZM550.92 88.2778C550.578 87.9363 550.025 87.9363 549.683 88.2778L544.32 93.638C543.978 93.9796 543.978 94.5329 544.32 94.8745C544.662 95.2161 545.215 95.2161 545.557 94.8745L550.92 89.5142C551.262 89.1727 551.262 88.6193 550.92 88.2778ZM569.066 70.1405C568.724 69.799 568.17 69.7991 567.829 70.1405L562.466 75.5008C562.124 75.8423 562.124 76.3956 562.466 76.7372C562.808 77.0788 563.361 77.0788 563.703 76.7372L569.066 71.377C569.407 71.0354 569.407 70.4821 569.066 70.1405ZM578.143 61.0699C577.801 60.7284 577.247 60.7285 576.906 61.0699L571.543 66.4302C571.201 66.7717 571.201 67.3251 571.543 67.6666C571.885 68.0082 572.438 68.0082 572.78 67.6666L578.143 62.3064C578.484 61.9648 578.484 61.4115 578.143 61.0699ZM587.219 51.9896C586.878 51.6481 586.324 51.6481 585.982 51.9896L580.62 57.3498C580.278 57.6914 580.278 58.2447 580.62 58.5863C580.961 58.9279 581.515 58.9279 581.857 58.5863L587.219 53.2261C587.561 52.8845 587.561 52.3312 587.219 51.9896ZM596.288 42.9249C595.947 42.5833 595.392 42.5833 595.05 42.9249L589.689 48.2851C589.347 48.6267 589.347 49.18 589.689 49.5216C590.03 49.863 590.584 49.8631 590.926 49.5216L596.288 44.1613C596.63 43.8198 596.63 43.2664 596.288 42.9249Z\"\n    fill=\"currentColor\" />\n  \u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M850.814 38.2277C854.547 38.2281 856.416 42.739 853.777 45.3773L763.223 135.889C762.437 136.674 761.371 137.116 760.26 137.116H673.176C669.443 137.116 667.574 132.605 670.213 129.966L760.768 39.4544C761.554 38.6692 762.62 38.2277 763.731 38.2277H850.814ZM761.338 121.8C760.855 121.8 760.463 122.191 760.463 122.674V131.13H762.213V122.674C762.213 122.191 761.821 121.8 761.338 121.8ZM761.338 108.971C760.855 108.971 760.463 109.363 760.463 109.846V118.301H762.213V109.846C762.213 109.363 761.821 108.971 761.338 108.971ZM761.338 96.1402C760.855 96.1406 760.463 96.5321 760.463 97.0149V105.47H762.213V97.0149C762.213 96.532 761.821 96.1404 761.338 96.1402ZM782.263 71.887C781.043 71.951 780.395 73.3571 781.139 74.3257L784.474 78.6631C779.115 82.951 771.242 85.7443 762.35 85.7444C753.366 85.7442 745.421 82.8944 740.059 78.5305C738.972 77.6461 737.373 77.8099 736.488 78.8961C735.602 79.983 735.766 81.582 736.853 82.467C743.231 87.6574 752.348 90.8207 762.35 90.8209C772.209 90.8208 781.205 87.746 787.568 82.6884L790.833 86.9341C791.577 87.9025 793.103 87.6391 793.479 86.4767L797.791 73.138C798.118 72.127 797.33 71.1017 796.268 71.1566L782.263 71.887ZM761.338 70.4847C760.855 70.4851 760.463 70.8767 760.463 71.3594V79.8147H762.213V71.3594C762.213 70.8766 761.821 70.485 761.338 70.4847ZM761.338 57.656C760.855 57.6564 760.463 58.048 760.463 58.5307V66.986H762.213V58.5307C762.213 58.0479 761.821 57.6563 761.338 57.656ZM761.338 44.8293C760.855 44.8297 760.463 45.2212 760.463 45.704V54.1592H762.213V45.704C762.213 45.2211 761.821 44.8295 761.338 44.8293Z\"\n    fill=\"currentColor\" />\n  \u003Cpath\n    d=\"M995.759 38.2277C999.53 38.228 1001.42 42.5171 998.752 45.0253L959.55 81.9005L905.796 41.5363C905.271 41.1418 904.662 41.0182 904.096 41.0994L997.485 130.319C1000.15 132.828 998.262 137.116 994.491 137.116H905.298C902.96 137.116 901.065 135.333 901.065 133.134V42.0941C901.065 42.0204 901.07 41.9483 901.079 41.8786C901.258 39.8345 903.079 38.2277 905.298 38.2277H995.759Z\"\n    fill=\"currentColor\" />\n  \u003Cpath\n    d=\"M505.873 0C506.657 4.57042e-05 507.307 0.195499 507.823 0.587023C508.338 0.969046 508.596 1.53802 508.596 2.29251C508.596 2.76034 508.467 3.19015 508.209 3.58162C507.951 3.96344 507.497 4.26401 506.848 4.48361V4.54114C507.65 4.67487 508.205 4.96191 508.51 5.4012C508.816 5.83087 508.969 6.31772 508.969 6.86193C508.969 7.74056 508.672 8.41851 508.08 8.89604C507.497 9.38304 506.733 9.62731 505.787 9.62738C504.861 9.62738 504.158 9.42172 503.68 9.0111C503.212 8.60054 502.935 8.08005 502.849 7.44993L503.881 7.10571L503.924 7.24028C504.035 7.54934 504.211 7.82925 504.454 8.07986C504.731 8.36635 505.166 8.50986 505.758 8.50989C506.465 8.50989 506.943 8.32772 507.191 7.9648C507.449 7.6019 507.579 7.20078 507.579 6.7615C507.579 6.2173 507.378 5.80683 506.977 5.52992C506.585 5.25295 505.93 5.10026 505.013 5.07161V4.15402C505.901 4.12537 506.489 3.92484 506.776 3.55237C507.062 3.18009 507.206 2.82242 507.206 2.47876C507.206 1.62801 506.752 1.17539 505.845 1.12237L505.658 1.11749C505.467 1.11752 505.242 1.14605 504.985 1.2033C504.736 1.25105 504.511 1.3274 504.31 1.43245L504.081 2.56457L503.05 2.44951L503.322 0.687461C503.666 0.49653 504.068 0.33454 504.526 0.200875C504.985 0.0671945 505.434 0 505.873 0Z\"\n    fill=\"currentColor\" />\n  \u003Cpath\n    d=\"M905.727 2.30616L904.638 2.4066L904.466 1.26083H901.428V3.72497C901.533 3.71544 901.643 3.71034 901.757 3.71034H902.086C902.755 3.71034 903.386 3.78668 903.979 3.93949C904.58 4.09229 905.068 4.38363 905.44 4.8132C905.822 5.23335 906.014 5.84949 906.014 6.66106C906.014 7.64468 905.722 8.38068 905.14 8.86776C904.557 9.36434 903.783 9.6127 902.818 9.61275C901.91 9.61275 901.213 9.40711 900.725 8.99648C900.248 8.59544 899.96 8.08007 899.865 7.44993L900.911 7.10571C901.007 7.49723 901.203 7.8271 901.499 8.09449C901.795 8.37131 902.211 8.50985 902.746 8.50989C903.395 8.50989 903.869 8.33787 904.165 7.99405C904.461 7.65981 904.609 7.22507 904.609 6.69031C904.609 5.87861 904.337 5.3625 903.792 5.14279C903.248 4.91361 902.612 4.79958 901.886 4.79955C901.695 4.79955 901.489 4.80365 901.27 4.8132C901.059 4.82275 900.854 4.83701 900.653 4.85611L900.224 4.44071V0.143343H905.569L905.727 2.30616Z\"\n    fill=\"currentColor\" />\n  \u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M765.49 6.04576H766.966L766.837 7.14862H765.49V9.48404H764.185V7.14862H759.857L759.713 6.04576L762.909 0.143343H765.49V6.04576ZM760.96 6.04576H764.185V1.26083H763.541L760.96 6.04576Z\"\n    fill=\"currentColor\" />\n  \u003Cpath d=\"M4.80573 6.47481H6.41154V7.60693H1.81068V6.47481H3.50235V1.27546H1.81068V0.143343H4.80573V6.47481Z\"\n    fill=\"currentColor\" />\n  \u003Cpath\n    d=\"M254.359 0C255.353 0 256.055 0.239186 256.466 0.716715C256.877 1.18447 257.083 1.68072 257.083 2.20573C257.083 2.85516 256.849 3.44346 256.38 3.96875C255.912 4.49397 255.348 4.96638 254.689 5.38657C254.039 5.79717 253.437 6.15968 252.883 6.47481H256.423L256.538 5.42948L257.599 5.51529L257.426 7.60693H251.407L251.292 6.58987C252.582 5.73032 253.638 4.98523 254.46 4.35489C255.281 3.71509 255.693 3.05632 255.693 2.37832C255.693 1.53787 255.166 1.11749 254.115 1.12237L254.115 1.11749C253.924 1.11754 253.695 1.14604 253.427 1.2033C253.16 1.25104 252.916 1.32238 252.697 1.41783L252.467 2.47876L251.45 2.3637L251.707 0.60165C252.118 0.401088 252.563 0.253475 253.041 0.15797C253.519 0.0529708 253.958 1.99446e-05 254.359 0Z\"\n    fill=\"currentColor\" />\u003C/g>",[18,350,675,878],{"id":19,"title":20,"body":21,"date":339,"description":340,"extension":341,"isKinnuverse":6,"meta":342,"navigation":343,"path":344,"seo":345,"sitemap":346,"stem":347,"thumbnail":348,"__hash__":349},"blog/blog/research/what-we-found-in-our-10000-person-learning-experiments.md","What we found in our 10,000-person learning experiments",{"type":22,"value":23,"toc":328},"minimark",[24,28,31,36,47,50,58,61,69,73,80,87,90,97,104,114,117,126,129,132,137,140,143,148,151,154,160,166,169,174,177,180,185,188,191,198,205,211,217,220,225,228,231,236,239,255,261,267,273,276,281,284,289,292,297,304,310,321],[25,26,27],"p",{},"We’ve spent the best part of two years building an app that harnesses cognitive science to superpower people’s learning abilities. And we were doing pretty well at it! But ten months ago, we decided we could do better. We felt that we could build something truly special.",[25,29,30],{},"At the end of 2023, we decided to halt all our other operations and focus on deep research to answer one question: what’s the very best way to learn? We think we’ve come close to answering that question, and you can read about it in our findings below.",[32,33,35],"h2",{"id":34},"but-first-some-context","But first, some context",[25,37,38,39,46],{},"Throughout our research work at Kinnu we use a ",[40,41,45],"a",{"href":42,"rel":43},"https://hannatransfers.substack.com/p/how-to-measure-learning-gain",[44],"nofollow","learning metric called a K-Score"," to measure how much someone has learned from an intervention. Put simply, the higher the K-Score, the more effective the learning intervention. Based on a massive literature review, and the expertise of several of our team with PhDs in the science of learning, we set out 15 different experiments to run. Each of these was based on a different hypothesis about how we could improve learning.",[25,48,49],{},"The outcomes of these experiments would be measured in K-Score improvements, which we believe are a failsafe method for measuring actual learning.",[25,51,52,53,57],{},"The next step was to find learners who’d be our test subjects. We put out a notice to our existing users, with an ambitious target of recruiting 500 volunteers. Within a couple of weeks we actually had ",[54,55,56],"strong",{},"10,000,"," a number that truly blew us away. If you were one of them, we thank you from the bottom of our hearts.",[25,59,60],{},"So, now we had a list of experiments, a method for measuring their outcomes, and an army of volunteers.",[25,62,63,64,68],{},"There was one more thing we needed – a platform for testing on. Our proposed experiments deviated pretty heavily from our existing app, and it became clear that we’d actually need to build a ",[65,66,67],"em",{},"totally new app"," to run our experiments on. So that’s what we did – we built a minimal, rough-and-ready experimental learning app called Kinnu Labs. We gave access to our testers, and we were ready to start experimenting.",[32,70,72],{"id":71},"the-big-picture","The Big Picture",[25,74,75,76,79],{},"Let’s cut to the chase – what did we learn from our experiments? The short answer is… enough to rebuild our whole app from the ground up. Based on our findings, we’ve rebuilt Kinnu into ",[54,77,78],{},"Kinnu 2.0,"," which is a massive leap forward from the original app.",[25,81,82,83,86],{},"If we could put all of our findings into a single, big-picture summary, it would be this: ",[54,84,85],{},"people learn by building schemas",". That means that learning is about figuring out how to arrange disparate pieces of information to piece together your own model of a concept.",[25,88,89],{},"A lot of that comes down to the pace of learning, and delivering the right information at the right moment for the learner to continue to improve their understanding. It also means minimising cognitive load – a lot of our new features work by compressing content, such as questions or definitions, while retaining the same essential information. This allows learners to retain more information from a lighter load of content.",[25,91,92,93,96],{},"These were our very high-level findings, but the ",[65,94,95],{},"really"," interesting stuff (if you are as geeky as we are) comes in the details. Let’s take a look at some of the experiments we ran, and how they’ve shaped the app:",[98,99,101],"h3",{"id":100},"finding-number-1-content-is-still-king",[54,102,103],{},"Finding Number 1: Content Is (Still) King",[25,105,106],{},[107,108],"img",{"alt":109,"className":110,"src":113},"",[111,112],"my-12","w-full","/blog/posts/what-we-found-in-our-10000-person-learning-experiments/content-findings.png",[25,115,116],{},"Across every single one of our experiments, we found the most significant variable in determining K-Score was the quality of the written content. As a result, we’ve seriously shaken up content design in Kinnu 2.0.",[25,118,119,122,123],{},[54,120,121],{},"Orb Pathways, resulting in +69%"," ",[54,124,125],{},"K-Score",[25,127,128],{},"The brain learns best when it can master clearly defined concepts. 'Chunking' is how your brain groups pieces of information together to form discrete concepts. It's essential to be able to form those chunks in order to construct schemas.",[25,130,131],{},"As a result we’ve implemented a new unit of content called ‘orbs’. Each session you now do on Kinnu will be a single, coherent unit of content designed to teach a distinct concept. This has had a major impact on K-Score, while also making the pathways a lot more engaging and readable.",[25,133,134],{},[54,135,136],{},"Pathways that tell a narrative +68% K-Score",[25,138,139],{},"Another quirk of the human brain is that we love stories. We're way, way better at retaining information when it's linked into a narrative.",[25,141,142],{},"We’ve now shifted towards writing content in a more narrative, storified way, as opposed to drier, more factual styles. We found our test subjects were much better able to retain information as a result.",[25,144,145],{},[54,146,147],{},"Introductory questions and summary points +70% K-Score",[25,149,150],{},"The priming effect is what happens when you’re given a little preview of what you’ll learn, before you learn it. This gives us an outline, that we can then fill in with the details.",[25,152,153],{},"In Kinnu 2.0, before you read an orb, you’re given a preview of what you’ll learn in the form of some questions (which you don’t need to answer). At the end, you’ll read some bullet points that summarise what you’ve just learned.",[98,155,157],{"id":156},"finding-number-2-interaction-matters",[54,158,159],{},"Finding Number 2: Interaction Matters",[25,161,162],{},[107,163],{"alt":109,"className":164,"src":165},[111,112],"/blog/posts/what-we-found-in-our-10000-person-learning-experiments/interaction-findings.png",[25,167,168],{},"We played around with a number of novel ways of interacting with Kinnu. Some of these were surprisingly _un_successful, like an AI microtutor which seemed to have little to no effect on learning. Others were better, and have made it into Kinnu 2.0:",[25,170,171],{},[54,172,173],{},"Concept Lookup +66% K-Score",[25,175,176],{},"For people to build schemas, they need to piece together previously learned concepts into a bigger picture, or higher level of abstraction. To do this, it’s essential that they grasp those building-block concepts first.",[25,178,179],{},"Concept Lookup allows you to tap on a highlighted concept and be taken to a definition of it. You can also jump back to the point in the pathway you learned it from. This helps with staying on top of complex definitions and building larger models of understanding.",[25,181,182],{},[54,183,184],{},"'Why/How' Write-In Answer Questions +66% K-Score",[25,186,187],{},"It’s much more effective, from a learning angle, to write your own answer than to select from multiple options. We’re introducing ‘How/Why Questions’, a feature where users enter their own answers to open-ended questions.",[25,189,190],{},"Learners write their own answers to open-ended questions in-app. These are then graded by AI. Originally we had wanted this to be a peer-grading feature, with users rating each other’s answers. This isn't simple to build, so to gauge interest, we built an AI marking system instead. Users thought they were marking each other’s answers, but they were actually grading and being graded by GPT-4o.",[25,192,193],{},[107,194],{"alt":109,"className":195,"src":197},[111,112,196],"max-w-60","/blog/posts/what-we-found-in-our-10000-person-learning-experiments/why-how-mockup.png",[25,199,200,201,204],{},"It turns out that AI is actually pretty decent at grading learner’s answers. While we love the idea of implementing peer-grading one day, for the time being we'll be sticking with AI. ",[65,202,203],{},"Side note: If you were one of those users who thought they were doing peer grading - surprise! Sorry we couldn't tell you this at the time, but it was important for the experiment that this was kept secret."," ",[98,206,208],{"id":207},"finding-number-3-questions-can-be-so-much-more",[54,209,210],{},"Finding Number 3: Questions Can Be So Much More",[25,212,213],{},[107,214],{"alt":109,"className":215,"src":216},[111,112],"/blog/posts/what-we-found-in-our-10000-person-learning-experiments/question-findings.png",[25,218,219],{},"Many of our experiments were focused on gauging which kinds of questions have the greatest impact on learning. We experimented with LOADS of different question types, to varying degrees of success. Here are our best-performing ones, which you’ll see in Kinnu 2.0",[25,221,222],{},[54,223,224],{},"Collapsing Questions +59% K-Score",[25,226,227],{},"Cognitive load is what happens when you try to think about too many things at once. It's a huge problem when you're trying to learn.",[25,229,230],{},"Having a massive pile of questions to complete can contribute to this. Collapsing questions is a neat design that combines several related questions into one – reducing the number of reviews you need to complete, while still teaching you the same information",[25,232,233],{},[54,234,235],{},"‘Graph’ Questions +33% K-Score",[25,237,238],{},"Another huge boost to learning is the ability to situate ideas in context. It’s not enough to just know one thing – you need to understand how it relates to the other stuff in the same topic. Graph Questions are a whole family of new question types that allow you to answer questions on a load of different data types, including:",[240,241,242,246,249,252],"ul",{},[243,244,245],"li",{},"Timelines for dragging and dropping events",[243,247,248],{},"A world map you can drop pins on",[243,250,251],{},"Different forms of ordering (tallest to shortest, hottest to coldest, etc.)",[243,253,254],{},"Matching pairs",[25,256,257],{},[107,258],{"alt":109,"className":259,"src":260},[111,112,196],"/blog/posts/what-we-found-in-our-10000-person-learning-experiments/timeline-mockup.png",[98,262,264],{"id":263},"some-honorable-mentions-keep-your-eyes-peeled-in-the-future",[54,265,266],{},"Some honorable mentions (keep your eyes peeled in the future 👀)",[25,268,269],{},[107,270],{"alt":109,"className":271,"src":272},[111,112],"/blog/posts/what-we-found-in-our-10000-person-learning-experiments/honorable-mentions.png",[25,274,275],{},"Above, we listed the main takeaways we took from our experiments, and the features we’ve built to implement those findings in Kinnu. But there were plenty of other experiments too. Some of these were successful, others less so. Here are a few honourable mentions, some of which we’re more than a little likely to visit again in future.",[25,277,278],{},[54,279,280],{},"Case studies for higher-order thinking +82% K-Score",[25,282,283],{},"We let learners explore case studies teaching higher-order thinking skills. These were all about teaching abstract reasoning and critical thinking. The amazing learning impact means we will build in this space, but the content design is tricky. Watch this space!",[25,285,286],{},[54,287,288],{},"Interactive Non-Fiction +63% K-Score",[25,290,291],{},"Interactive non-fiction turned pathways into a kind of text-based adventure, where you could interact with different elements in a highly storified way. It was a lot of fun! However, some users found it a little confusing, and it was perhaps more a fun novelty than something we’d want to do long term",[25,293,294],{},[54,295,296],{},"Stickers +58% K-Score",[25,298,299,300,303],{},"Probably our most divisive experiment! This feature allows users to place stickers on their favourite parts of the app, as a personal tag and reminder for stuff they want to remember. Some ",[65,301,302],{},"loved"," this, others couldn’t bear it. Overall it was too divisive for us to build. But never say never…",[25,305,306],{},[107,307],{"alt":109,"className":308,"src":309},[111,112,196],"/blog/posts/what-we-found-in-our-10000-person-learning-experiments/stickers-mockup.png",[25,311,312,313,316,317,320],{},"So, there have been ",[54,314,315],{},"big things"," underway at Kinnu. As a result of these experiments we’ve built something that’s ",[54,318,319],{},"truly rooted in the research on how best to learn",".",[25,322,323,324,327],{},"And you can experience all of this for yourself ",[54,325,326],{},"very soon",". We will be rolling out Kinnu 2.0 in the coming weeks. If you’re reading this, there’s a good chance it’s already available. Have a look on the App Store and Play Store. Go on. You know you want to. We’ll see you on there 🐙",{"title":109,"searchDepth":329,"depth":329,"links":330},2,[331,332],{"id":34,"depth":329,"text":35},{"id":71,"depth":329,"text":72,"children":333},[334,336,337,338],{"id":100,"depth":335,"text":103},3,{"id":156,"depth":335,"text":159},{"id":207,"depth":335,"text":210},{"id":263,"depth":335,"text":266},"2024-08-05T00:00:00.000Z","At Kinnu we spent ten months researching learning interventions rooted in cognitive science. Here's how we did it, and how it's shaped the app.","md",{},true,"/blog/research/what-we-found-in-our-10000-person-learning-experiments",{"title":20,"description":340},{"loc":344},"blog/research/what-we-found-in-our-10000-person-learning-experiments","what-we-found-in-our-10000-person-learning-experiments.png","UMJf-3xUdut270cpG0BR_lzyUPL7nhCfws7b8iIiaxQ",{"id":351,"title":352,"body":353,"date":666,"description":667,"extension":341,"isKinnuverse":6,"meta":668,"navigation":343,"path":669,"seo":670,"sitemap":671,"stem":672,"thumbnail":673,"__hash__":674},"blog/blog/research/the-generative-ai-revolution.md","The Generative AI Revolution",{"type":22,"value":354,"toc":654},[355,358,361,370,373,376,380,383,386,395,398,402,405,408,422,442,446,449,452,459,463,466,469,495,499,502,505,509,512,517,523,533,537,540,543,546,551,555,558,561,564,567,570,575,579,582,603,607,610,615,620,625,630,635,640,643,646],[25,356,357],{},"A few weeks ago we launched Kinnu’s new pathway about the French Revolution. Hundreds of people have since started this learning pathway and of these 90% liked it (v. 93% median for Kinnu’s pathways and 99% for its best pathway). Not one person remarked that it was written with AI. ",[25,359,360],{},"Sure, a human was in the loop. An expert human wrote the plan. An average human removed the most obvious AI hallucinations. A human editor made the text flow. But AI wrote most of the text. AI generated pathway images. AI converted text into audio. If we were doing content generation in this way at scale, we could have done most of it directly with APIs and further streamlined the whole process.",[25,362,363,364,369],{},"Overall we saved 90% of time it would have otherwise typically taken us to make this learning content (",[40,365,368],{"href":366,"rel":367},"https://kinnu.xyz/blog/research/from-woman-to-machine/",[44],"here"," are details of the old and new process). And this is mostly with GPT-3. What will GPT-4 be able to do? What we know is that we are rebuilding our entire content creation operational model based on our French Revolution experiment. The Generative AI Revolution is here, and like the French Revolution it will shake up the old order. ",[25,371,372],{},"If you’re a content publisher, take note – if you think that proliferation of digital media changed your business model, brace yourself for what’s coming. Everyone else – rejoice! Content just got free (or in our case – 10x cheaper). ",[25,374,375],{},"We have learned tons from our experiments using AI for content creation which we wanted to share with you. Are you an academic drafting your annual curriculum? Student preparing an essay? Content marketer bored with the topic of your latest project? Below are our top 10 tips for generating content with AI.",[32,377,379],{"id":378},"give-ai-precise-instructions","Give AI precise instructions",[25,381,382],{},"Most generative text AI models like GPT-3 work by giving you the most probable next word based on a large language model (some others such as Natural Language Toolkit (NLTK) in Python or Washington Post’s Heliograf instead follow pre-defined grammatical and syntax rules and operate within the symbolic nature of language (the former typically outperform the latter though!).",[25,384,385],{},"You need to give the model a clear and crisp prompt, and even better a few examples. I typically try to imagine I am speaking to a child who has limited knowledge and a rather naive view about the world – you cannot assume they know what you’re talking about. There should be no ambiguities. Linguistic precision is required. For example, a query like “French Revolution” will give you a few generalist paragraphs. But if you add precision, for example “Tell me what was gallicanism?”, your results will be much more useful.",[25,387,388,389,394],{},"But don’t let this child metaphor fool you. You are speaking to a machine, and the machine can deliver feats which even the most effective intern can’t dream about. An example would be using it to extract latest quarterly performance of major listed companies – ticker, market capitalisation, EBITDA, etc. and add it to a spreadsheet. ",[40,390,393],{"href":391,"rel":392},"https://github.com/garethdmm/SpreadsheetMagic",[44],"Here"," is a fantastic example of GPT-3 doing in few seconds something which took me long hours in my first job.",[25,396,397],{},"Pro tip: Be specific. Want a paragraph with 150 words? “In a paragraph, explain X” Want a bullet list? “Write a bullet list of reasons why London is the best city ever”.",[32,399,401],{"id":400},"adjust-available-settings-when-you-create-images-with-ai","Adjust available settings when you create images with AI",[25,403,404],{},"We started off generating images in DALL-E with simple prompts. However, we quickly realised that you can then fine tune the images using a ridiculously intuitive interface and you end up with a result that’s better than most of us can ever design, all in well under 30 seconds per image generated.",[25,406,407],{},"Little did we know, there is a science to “prompt whispering”. What you need to know to do it well is (1) parameters you can use to change the output of the prompt (2) data sets that were used to train the data and (3) general art history orientation. Compare the generic output of “French revolutionaries storming the Bastille” to a more precise output of “A black and white photo of French revolutionaries storming the Bastille”.",[25,409,410,411,416,417],{},"How does it work? It has to do with how tools like DALL-E and Midjourney bring images to life from noise. You specify the recipes that are used to make an image. There is now even a ",[40,412,415],{"href":413,"rel":414},"https://promptbase.com/",[44],"marketplace"," to buy prompts. But can such recipes even be copyrighted? ",[40,418,421],{"href":419,"rel":420},"https://www.london-law.co.uk/ive-developed-a-sensational-new-recipe-can-i-copyright-it/#:~:text=Copyright%20for%20written%20recipes,in%20its%20entirety%20without%20permission.",[44],"Probably not.",[25,423,424,429,430,435,436,441],{},[54,425,426],{},[65,427,428],{},"Pro tip:"," Learn more about Art History and datasets used to seed the DALL-E and Midjourney models. Take a look at all the different aspects of an image you can control with words in this ",[40,431,434],{"href":432,"rel":433},"https://openai.com/blog/dall-e/",[44],"DALL-E guidebook"," – you can specify objects, shape, colour, perspective, structure and style. You can go even further with Midjourney as per ",[40,437,440],{"href":438,"rel":439},"https://medium.com/mlearning-ai/an-advanced-guide-to-writing-prompts-for-midjourney-text-to-image-aa12a1e33b6",[44],"this guide",": in addition to basic stylistic features of DALL-E you can also control your images rendering (photographic texture), how stylized they are, chaos (how abstract they are) and weights to different kind of objects and stylistic features.",[32,443,445],{"id":444},"beware-text-generating-ai-often-lies-and-hallucinates","Beware: text generating AI often lies and hallucinates",[25,447,448],{},"When using AI-trained models to generate text using probabilistic methods, such as GPT-3, remember that the algorithm is trained on the web and it often makes glaring factual errors – GPT-3 was not designed to be a truth machine: it includes no higher level logical layer to check facts or make any logical conclusions. All it does is predict the next most likely word based on its training set. In our experiment it made a few hilarious / terrifying mistakes. No, Emmanuel Macron is not the king of France. No, Donald Trump has nothing to do with the French Revolution. No, we do not need a full list of religious orders abandoned by the French Revolution. Yes, all of this actually came up in actual GPT-3 output. ",[25,450,451],{},"As the editor, you need to be vigilant: some of the output will be incorrect, some will be entirely irrelevant. Some errors were glaring and anyone with common sense and high school level education can spot them. Other mistakes are more nuanced and require expert knowledge, for example the following statement “In 1787, the King of France, Louis XVI, summoned the Assembly of Notables to advise him on how to solve the financial crisis facing the Kingdom.” sounds entirely plausible to a non-expert. However, historians would posit instead that “In 1789, Louis XVI sought to solve a political crisis by summoning the “Etats Généraux”, an ancient institution.”",[25,453,454,458],{},[54,455,456],{},[65,457,428],{}," Fact check everything GPT-3 and the like generate. They were made to create content that’s plausible, not correct. They are very convincing. Eg. when we first started playing with GPT-3 in the beginning of September when you typed “What’s the difference between polonium and radium?” the answer was “One is radioactive and another one is not”. Fortunately. This was extremely plausible, and fortunately had been quickly fixed, but it’s a fantastic example of how an AI writer will take your bias (you’re looking for a difference) and lack of knowledge and run with it, offering plausible and false conclusions.",[32,460,462],{"id":461},"combine-sentences-to-avoid-gpt-3-sounding-like-it-has-just-learnt-english-as-a-foreign-language","Combine sentences to avoid GPT-3 sounding like it has just learnt English as a foreign language",[25,464,465],{},"We noticed that a lot of the sentences generated by GPT-3 sound as if the algorithm has just learnt English as a foreign language. Remember when you first learnt English (or another language if English is your first language) and you could only speak in very simple sentences? “Mary went to the library. The library is full of books. She took a book home.” AI generated text can sound very much like this: “The Revolution led to the rise of Napoleon Bonaparte. Napoleon Bonaparte was a French military leader and political leader. He became Emperor of France in 1804.”",[25,467,468],{},"AI seems to have absolutely no problem writing about people, events and facts. It is particularly good at definitions. It does not excel at abstract concepts. I have not seen it write conditional sentences, or comfortably compose sentences with words like “although” and “however”. It seems stuck in middle school writing.",[25,470,471,475,476,479,122,484,122,487,122,492],{},[54,472,473],{},[65,474,428],{}," Use AI generated sentences as the baseline structure of your paragraphs. Connect these simple sentences with some flair, for example: ",[65,477,478],{},"“",[54,480,481],{},[65,482,483],{},"[Human needs to add colour]",[65,485,486],{},"Although the exact period of the French Revolution is hotly contested, most contemporary historians agree that",[54,488,489],{},[65,490,491],{},"[AI has no problem saying]",[65,493,494],{},"the Revolution began in 1789 with the storming of the Bastille, a symbol of the absolute power of the monarchy. The Revolution ended in 1799 with the establishment of the French Republic.”",[32,496,498],{"id":497},"dont-expect-ai-to-have-opinions-though-it-sometimes-does","Don’t expect AI to have opinions (though it sometimes does!)",[25,500,501],{},"It is easy to see why you would not want AI to have political opinions. What is somewhat surprising, however, is that it has both no opinion on less politically charged topics, and a limited sense of ethical judgment. Below is an illustration of my chat with the DaVinci algorithm on GPT-3 about more contemporary subjects.",[25,503,504],{},"Pro tip: Do not use GPT-3 for editorials. ",[32,506,508],{"id":507},"dont-get-yourself-banned-by-requesting-inappropriate-images","Don’t get yourself banned by requesting inappropriate images",[25,510,511],{},"Despite what the media will have you believe, there are some constraints around what you can and cannot request from an image generating AI. For example, when GPT-3 algorithm output something along the lines that Emmanual Macron was the king of France, I thought it would be hilarious to generate a satirical image showing Emmanuel Macron as the king of France using DALL-E.",[25,513,514],{},[65,515,516],{},"“ It looks like this request may not follow our content policy. Further policy violations may lead to an automatic suspension of your account.”",[25,518,519,520],{},"You cannot generate images that may be perceived as offensive. Instead we had to be a bit creative and try to be more descriptive to get a similar result “",[65,521,522],{},"Young man in modern clothes sitting as the king of france in Louis XIV style throne room photorealistic.”",[25,524,525,527,528,532],{},[54,526,428],{}," DALL-E has a stricter content policy than a PG movie (you can read in detail ",[40,529,368],{"href":530,"rel":531},"https://labs.openai.com/policies/content-policy",[44],"). It does not allow you anything that could be perceived offensive, harassment, or inappropriate in any way. Not even if the intent is satirical (as far as we have been able to try out – generative AI does not have much of a sense of humour).",[32,534,536],{"id":535},"you-can-fine-tune-gpt-3-to-meet-your-particular-use-case","You can fine tune GPT-3 to meet your particular use case",[25,538,539],{},"There are amazing tools which allow you to personalise the output that GPT-3 gives you. You upload your unique training set and this changes the weights of the final loops that GPT-3 goes through. If you want to build output in a similar format, this can massively improve what you get out of the generic algorithm.",[25,541,542],{},"Because of the crazy large dataset models such as GPT-3  are trained on, they can do many tasks (such as sentiment analysis) without any training (called zero-shot learning). However, in some cases, like asking it to classify things based on non-traditional sentiments, it will benefit from a few examples (what is called few-shot learning).",[25,544,545],{},"When you are still not happy with the output from the few-shot examples, then you can fine tune the model with a training set (e.g. example sentences and their sentiments). This, depending on the task, can range from a few hundred to a few thousand examples, but then you will end up with your own customized model that you can use and reuse without giving any prompts",[25,547,548,550],{},[54,549,428],{}," You can use each AI tool’s customisation algorithm to add your custom data to help you get the output you want. This is true for both text and image generation. There are also amazing companies that are building infrastructure on top of these base models, and specialise in fine tuning.",[32,552,554],{"id":553},"use-apis-and-build-specialised-components-for-each-workstream","Use APIs and build specialised components for each workstream",[25,556,557],{},"AI tools like GPT-3 are generalist. They can do several different things, but it is significantly easier to ask for each thing separately. For example – you may want to build a full blown learning pathway about the French Revolution. While it might be nice to just get the full course all at once, it is actually easier to ask for it in parts.",[25,559,560],{},"You may ask for a bulleted plan of key points to know about the revolution. You may then write out key concepts (let’s say based on most frequently visited Wikipedia subpages, which you can easily pull via their API). You may then ask the tool to write specified paragraphs based on these concepts. You can then ask a different module to generate questions (one for short answer questions, one for multiple choice questions, one for true / false questions etc.).",[25,562,563],{},"The reason why it is beneficial to build out smaller components for different tasks is that despite encoding a massive amount of text in its model, when it comes to your own prompts and their responses, GPT-3 has a small working memory (or what is sometimes referred to as ‘context window’).",[25,565,566],{},"In fact, GPT-3 model has a request + response hard limit which is roughly equal to 1500 words. New requests will not remember previous ones and their answers, so the model cannot plan at a macro scale, it has to do one short topic at a time and it is your task to look at the bigger picture.",[25,568,569],{},"If you want to create a long narrative, then each new prompt should include key points from the previous response to sustain some continuity over separate calls.",[25,571,572,574],{},[54,573,428],{}," think of designing your generative AI project like a series of dominos. One function executes, triggering another which depends on its inputs, then another, then another.So for example you can start by building out a few paragraphs based on specific prompts. Then  you may generate headlines based on these paragraphs. The final step would include formulating review questions based on this content. The final step should always be to manually check the output for factual accuracy.",[32,576,578],{"id":577},"learn-the-key-vocabulary-associated-with-each-tool","Learn the key vocabulary associated with each tool",[25,580,581],{},"For example, it took me a while to discover that “temperature” in GPT-3 was not the level of enthusiasm the AI writer would display at the topic presented (as an overly enthusiastic person I naturally made this association). It’s actually the level of randomness – entropy – that you would like from your output. This means temperature of zero will always give you the same response for the same prompt, while the higher the temperature, the more likely that the responses will vary, but also more likely to drift from the original prompt.",[25,583,584,586,587,592,593,597,598],{},[54,585,428],{},": Want GPT-3 to sound a bit drunk? High temperature is your friend. I also did a bit of legwork and found these vocabulary guides for you for ",[40,588,591],{"href":589,"rel":590},"https://www.twilio.com/blog/ultimate-guide-openai-gpt-3-language-model",[44],"GPT-3",", ",[40,594,596],{"href":432,"rel":595},[44],"DALL-E"," and ",[40,599,602],{"href":600,"rel":601},"https://midjourney.gitbook.io/docs/resource-links/guide-to-prompting",[44],"Midjourney.",[32,604,606],{"id":605},"generative-ai-is-not-great-at-ambiguous-tasks","Generative AI is not great at ambiguous tasks",[25,608,609],{},"Generative AI, as it is now, is excellent at answering specific, factual questions, but it assumes it knows everything about your intent – it’s not there to ask you follow-up questions when appropriate (maybe one day it will?!). This means it’s not great at responding to extremely useful and frankly not that hard tasks, such as: “What should one know about the French Revolution?”. Instead of asking for more context, for example",[25,611,612,204],{},[65,613,614],{},"AI: “Why do you need this information?”",[25,616,617,204],{},[65,618,619],{},"Me: “Because I need it to generate a learning pathway about the French Revolution”",[25,621,622],{},[65,623,624],{},"AI: “Why would your audience want to learn about the French Revolution?”",[25,626,627],{},[65,628,629],{},"Me: “To once and for all remember all the key aspects of the French Revolution to apply this knowledge easily later”",[25,631,632],{},[65,633,634],{},"AI: “At what level of detail?”",[25,636,637],{},[65,638,639],{},"Me: “Ten sections, each with 10 subsections, ideally with titles clearly but crisply defining the content of each subsection”.",[25,641,642],{},"GPT-3 simply gave me a very generic answer which did not correspond to the above intent. Even using higher number of token and a more specific query “Generate a bullet list of headlines about things one should know about the French Revolution.”",[25,644,645],{},"Pro tip: We are further testing if using the API we can get generative AI to ask questions if unsure where to go next instead of delivering the wrong result. So far I’d encourage you to not use it for ambiguous tasks such as generating topic outlines.",[25,647,648,649,653],{},"If I missed anything – I would love to hear from you at ",[40,650,652],{"href":651},"mailto:hanna@kinnu.xyz","hanna@kinnu.xyz"," or below. This topic is dear to my heart and we are looking to use generative AI to create a set of the ultimate study materials for any topic.",{"title":109,"searchDepth":329,"depth":329,"links":655},[656,657,658,659,660,661,662,663,664,665],{"id":378,"depth":329,"text":379},{"id":400,"depth":329,"text":401},{"id":444,"depth":329,"text":445},{"id":461,"depth":329,"text":462},{"id":497,"depth":329,"text":498},{"id":507,"depth":329,"text":508},{"id":535,"depth":329,"text":536},{"id":553,"depth":329,"text":554},{"id":577,"depth":329,"text":578},{"id":605,"depth":329,"text":606},"2023-11-22T00:00:00.000Z","The Generative AI Revolution is here, and like the French Revolution it will shake up the old order.",{},"/blog/research/the-generative-ai-revolution",{"title":352,"description":667},{"loc":669},"blog/research/the-generative-ai-revolution","the-generative-ai-revolution.png","lp16dK8-MGySmHkl1MtC3DmhedJWX3IKz3KHkJEZzc4",{"id":676,"title":677,"body":678,"date":869,"description":870,"extension":341,"isKinnuverse":6,"meta":871,"navigation":343,"path":872,"seo":873,"sitemap":874,"stem":875,"thumbnail":876,"__hash__":877},"blog/blog/research/how-to-produce-consistently-styled-image-sets-in-midjourney.md","How to produce consistently styled image sets in Midjourney",{"type":22,"value":679,"toc":867},[680,689,692,695,698,701,704,707,710,713,716,719,722,725,728,731,734,737,747,754,761,768,771,774,777,785,792,799,806,809,812,815,821,827,832,838,841,844,847,852,857,862],[25,681,682,683,688],{},"Midjourney has been setting the internet ablaze in the past few weeks. Twitter, Reddit and LinkedIn are full of near-perfect image generations from the service. Pretty much anyone can produce great images with it – if you take a look at ",[40,684,687],{"href":685,"rel":686},"https://docs.midjourney.com/docs/parameter-list",[44],"the basic parameters",", and spend 15 minutes playing around with prompts, chances are you’ll produce something that you’d be proud to publish.",[25,690,691],{},"The hard part, however, is getting multiple images that work together. If you’re involved in producing websites, articles, apps or web content of any kind, you’ll know that consistency is key. You want your assets to work together to present a unified aesthetic across whatever piece of content you’re working on.",[25,693,694],{},"Without a bit of prompting know-how, you’ll probably end up with image sets that look random and unprofessional. For example, if you were trying to produce an image set for an educational course on Enlightenment philosophy, chances are you’d write prompts like this:",[25,696,697],{},"Jeremy Bentham",[25,699,700],{},"René Descartes",[25,702,703],{},"Galileo",[25,705,706],{},"An eighteenth-century telescope.",[25,708,709],{},"They’re pretty great images, all things considered, but they aren’t consistent. We’ve got one photograph, a graphic, a black-and-white illustration, and an engraving-style print.",[25,711,712],{},"What we’re aiming for (which is what we’ve produced with our method) is something like this:",[25,714,715],{},"These now look much more consistent and create a sense of continuity across the course.",[25,717,718],{},"So how do we do this? The answer, as you can probably guess, lies in creating consistency in our prompts.",[25,720,721],{},"We’ve devised a repeatable structure for writing prompts for large image sets that allows you to control the level of consistency, as well as the elements of your images that you want to stay consistent. This structure also makes your prompts modular – you can swap in and out the elements you want, making it easier to produce prompts at scale, especially if you are writing them in a spreadsheet.",[25,723,724],{},"The structure works by breaking down the prompt into five elements, separated by semi-colons. These are:",[25,726,727],{},"Scene depicted; Location; Lighting; Art style; Parameters",[25,729,730],{},"The reason we break these down into five parts is that we then have five possible levels of consistency to play with. ",[25,732,733],{},"If we want to have images that are kinda similar, but are still varied enough to avoid repetition, we can just keep two elements constant (in this instance, Art style and Parameters), and change the rest based on the contents of each image. In our system we’ll call this a two-factor similarity, because only two elements remain constant. ",[25,735,736],{},"The prompts for a two-factor image set would look like this (with the constant elements in bold):",[25,738,739,740,743,744],{},"Jeremy Bentham sitting on a bench; A light, airy terrace; Bright daylight; ",[54,741,742],{},"18th-century oil painting","; ",[54,745,746],{},"–v 5 –q 2 –s 130",[25,748,749,750,743,752],{},"René Descartes at a desk, looking towards us; The Bodleian Library, Oxford; Golden hour light coming through the windows; ",[54,751,742],{},[54,753,746],{},[25,755,756,757,743,759],{},"Galileo looks at a globe, deep in thought; An Italian villa; Midday sunlight; ",[54,758,742],{},[54,760,746],{},[25,762,763,764,743,766],{},"An eighteenth-century telescope next to a tree; A grassy field; Bright Springtime light; ",[54,765,742],{},[54,767,746],{},[25,769,770],{},"And if you want to see the outputs just look at the four images above! Those are the images we produced with two factors of similarity. We find two-factor similarity, specifically ones where the art styles and parameters are the shared elements, to be the ‘Goldilocks’ structure for our use case – retaining stylistic consistency without being repetitive or boring.",[25,772,773],{},"However, if you want to create greater consistency, or greater randomness, in your image sets, feel free to play around with one-factor, three-factor or even four-factor similarity in your prompt sets. Of course, a five-factor similarity would just be the same prompts repeated again and again!",[25,775,776],{},"The risk when you start adding higher-factor similarities is that they become repetitive. To illustrate this, here are some four-factor similarity prompts for the same scenes used above:",[25,778,779,780,743,783],{},"Jeremy Bentham sitting on a bench; ",[54,781,782],{},"The Bodleian Library, Oxford; Golden hour light coming through the windows; 18th-century oil painting",[54,784,746],{},[25,786,787,788,743,790],{},"René Descartes at a desk, looking towards us; ",[54,789,782],{},[54,791,746],{},[25,793,794,795,743,797],{},"Galileo looks at a globe, deep in thought; ",[54,796,782],{},[54,798,746],{},[25,800,801,802,743,804],{},"An eighteenth-century telescope; ",[54,803,782],{},[54,805,746],{},[25,807,808],{},"And the results:",[25,810,811],{},"These are pretty nice images, and for some use cases this level of similarity will be what you need, but you can probably see how these are a little on the repetitive side.",[25,813,814],{},"It can also be useful to play around with which elements you keep constant, and which are variable within your set. For example, you might try a two-factor similarity with just location and lighting the same. Let’s take a look at some prompts for that:",[25,816,779,817,820],{},[54,818,819],{},"Fitzroy Square, London; Bright, Spring light;"," Watercolour painting; –v 5 –q 2",[25,822,823,824,826],{},"René Descartes looking towards us; ",[54,825,819],{}," Realist painting; –v 5 –q 2 –s 200",[25,828,794,829,831],{},[54,830,819],{}," Children’s illustration; –v 5 –q 2 –s 130",[25,833,834,835,837],{},"An eighteenth-century telescope;  ",[54,836,819],{}," 35mm, photorealistic, Canon EOS 5D Mark IV DSLR, f/5.6 aperture, 1/125 second shutter speed, ISO 100; –v 5 –q 2 –s 90",[25,839,840],{},"The results:",[25,842,843],{},"They have a similar colouring and mood, while clearly having very different styles. This might be useful for people who want to make large image sets that deal with varied content, but also would like to keep a thread of aesthetic similarity running between them.  ",[25,845,846],{},"We recommend experimenting with our prompting structure to see how you can use it to create consistent Midjourney images. By taking a modular approach, breaking the prompts into five parts, the possibilities for different combinations are massive! ",[25,848,849],{},[65,850,851],{},"PS: If you’re new to Midjourney you might be a little confused by the ‘parameters’ element of our prompts – the stuff that looks like ‘–v 5 –q 2 –s 130’.",[25,853,854,204],{},[65,855,856],{},"A quick summary: ‘–v 5’ means you’ll be using Midjourney version 5, the most recent version. You could opt for ‘–v 4’ or ‘–v 3’ if you wish, but ‘–v 5’ tends to produce the highest quality results.",[25,858,859,204],{},[65,860,861],{},"‘–q 2’ is asking for Midjourney to dial up the ‘quality’ parameter to 2. Basically that means a more resource-intensive but higher-quality rendering. If you’d like to be more economical with your fast hours, use a lower quality parameter.",[25,863,864],{},[65,865,866],{},"Finally, ‘–s 130’ is tuning up the ‘style’ parameter, which is set to 100 by default. Higher style parameters will produce images that are focus more on aesthetics than on accuracy to the scene requested. So if you’re producing something abstract, tune it up. For something precise, like what we are creating here, you’ll want to keep the style parameter pretty low.",{"title":109,"searchDepth":329,"depth":329,"links":868},[],"2023-04-27T00:00:00.000Z","Follow our formula for creating beautiful image sets with a consistent style. Ideal for content creators, marketers, web designers and more. This article assumes you have a basic familiarity with how to use Midjourney.",{},"/blog/research/how-to-produce-consistently-styled-image-sets-in-midjourney",{"title":677,"description":870},{"loc":872},"blog/research/how-to-produce-consistently-styled-image-sets-in-midjourney","how-to-produce-consistently-styled-image-sets-in-midjourney.png","8QXrmVnEqdghm61C7al0UmUROGOBDNG6nuZp-5BE1Gs",{"id":879,"title":880,"body":881,"date":1071,"description":1072,"extension":341,"isKinnuverse":6,"meta":1073,"navigation":343,"path":1074,"seo":1075,"sitemap":1076,"stem":1077,"thumbnail":1078,"__hash__":1079},"blog/blog/research/from-woman-to-machine.md","From (wo)man to machine – building Kinnu’s learning pathways",{"type":22,"value":882,"toc":1060},[883,887,896,903,906,910,913,916,919,922,925,929,932,935,938,942,956,960,963,966,975,979,982,985,988,991,994,998,1001,1004,1007,1010,1013,1022,1026,1035,1038,1042,1045,1048,1051,1054,1057],[32,884,886],{"id":885},"content-is-king-but-is-it-about-to-be-dethroned","Content is king. But is it about to be dethroned?",[25,888,889,890,895],{},"Bill Gates declared that “Content is King” in his 1996 ",[40,891,894],{"href":892,"rel":893},"https://threestepsbusiness.com/content-is-king-bill-gates/",[44],"now famous essay",". But is that really true in the age of SEO, where more is more? Today, the wider your net of content, the more users you catch.",[25,897,898,899,902],{},"And what is the best definition of good quality of ",[65,900,901],{},"learning"," content? Who judges the quality: the learners or Google’s search algorithm? At Kinnu, we think about quality in terms of giving the learner the fastest route towards mastery: the content needs to be accurate (duh), engaging to make sure you don’t fall asleep while learning and as concise as possible to be respectful of the learners’ time and minimise their cognitive load. We are passionate about efficiently using time and resources, so these are important considerations which also impact what we consider “effective content creation”.",[25,904,905],{},"Do humans write better than AI tools like GPT-3? Are humans better at creating diagrams and images? Writing quizzes? Are there specific tasks where humans will continue to excel over AI in the short term? In the long term? These are just some of the many questions which our content and AI team (a big word for 3 frenzied individuals) has to answer in the next year.",[32,907,909],{"id":908},"starting-with-the-human","Starting with the human",[25,911,912],{},"We get a lot of questions about how Kinnu pathways are created. We work with a team of talented writers who are experts in writing but not specialised experts in a topic. ",[25,914,915],{},"Each pathway takes a week to plan, three weeks to write and a week to edit. Where the topic requires more specialised expertise, we submit it for a subject matter expert review, both in the planning stage and in the final review stage. This process ensures our materials are written balancing both accuracy and approachability.",[25,917,918],{},"I know a thing or two about content creation at a MOOC provider and I am quite proud with what we have been able to achieve in our first 3 months of making content at Kinnu. We have produced 35 learning pathways 5 times faster and 10 times cheaper than a typical online education provider would be able to achieve. And we had a team of 2 people instead of a typical team of 20-30. ",[25,920,921],{},"Where did we find an order of magnitude time saving, you ask? Well, the main time saving comes from no need for institutional approvals. A typical MOOC is stuck for 10 weeks in different parts of an approval process – even things as “no-brainer” as changing a course title to something more SEO friendly can take 2-3 weeks. The second time saver comes from not producing video content, because who has the time to watch talking heads when you can read 2x faster. Of course we also run a tight ship – operations are already running like clockwork, but we also reflect every single month on how to further scale and improve content production.",[25,923,924],{},"Our content creation process is mostly human today, relying on work of our external team of writers, with hundreds of in-house editorial hours spent editing drafts, writing questions, selecting images and generating audio . But we want to go faster and get better. We want to understand each part of the process intimately. Why? Because we want to know where it’s appropriate to invite robots to the party. That’s why we’re moving towards AI led learning content creation.",[32,926,928],{"id":927},"all-protein-no-carbs-and-definitely-no-academese","All protein, no carbs and definitely no academese",[25,930,931],{},"We have thought long and hard about how to create the best learning content for Kinnu and we feel quite strongly about having non-experts as pathway authors. During my PhD, I read enough academic articles and textbooks to realise that very few experts are also gifted teachers who are able to explain with clarity and simple language core concepts in the field. Of course there are exceptions to this (special mention to Richard Feynman and Roger Penrose). ",[25,933,934],{},"This is because experts have a tendency to use a foreign language called academese.",[25,936,937],{},"We wanted to create learning pathways which are as efficient as possible, providing our learners the shortest pathway towards mastery. No fluff. As few words as possible. Just enough human interest stories to keep you engaged. Enough context and valuable knowledge to build deep expertise, bite by bite. Kinnu is here to save you time and introduce must-know concepts in a topic. The rest of the internet is well suited to help you deepen your knowledge on any topic.",[32,939,941],{"id":940},"what-have-we-learnt-so-far","What have we learnt so far?",[25,943,944,945,597,950,955],{},"The verdict so far – our learning content is not epic like CS50 or Steven Pinker’s books, but reads better than most online courses I have done. It’s a lot of reading, it’s hard to find adequate copyright free images, but thank you wholeheartedly to ",[40,946,949],{"href":947,"rel":948},"https://unsplash.com/",[44],"Unsplash",[40,951,954],{"href":952,"rel":953},"https://commons.wikimedia.org/wiki/Main_Page",[44],"Wikicommons"," for the ones we have. Is it perfect? No. Despite a talented (if young!) editorial team and a specialty hired proofreading service, the pathways still are ridden with typos (about 5-10 per pathway). The writing is good, but definitely not Pulitzer material. Less dry than Wikipedia can be but more engaging than a typical book written by a deeply passionate author with a strong point of view. We are going fast, but we feel the need to both go slower, to get high polish required for our discerning learners, and faster to get more content onto the app as quickly as possible. And, of course, all content meets Kinnu’s own quality requirement – finding the fastest and most engaging way to learn.",[32,957,959],{"id":958},"a-voice-from-a-cloud","A Voice From A Cloud",[25,961,962],{},"From our work so far, we have learned that AI generated voices have gotten much, much better over the last couple of years. Audio generated transcripts can easily be updated, keeping content fresh and accurate in a fast changing world..",[25,964,965],{},"Additionally, AI generated transcripts can be arranged via an integrated APi, radically decreasing the operational complexity of content creation. And *drumroll* in our humble opinion AI sounds better than humans do when you slow down or speed up your audio (which most of our audio fans tend to do, based on research)",[25,967,968,969,974],{},"Down the line we will also experiment with AI-generated videos. ",[40,970,973],{"href":971,"rel":972},"https://www.synthesia.io/",[44],"Synthesia"," is an AI video generating tool that is truly impressive. It is super convenient to use, makes it easy to personalise and update the content as well as translate anything into several languages: a powerful technology which is getting both better and cheaper every day.",[32,976,978],{"id":977},"where-we-want-to-get-to","Where we want to get to",[25,980,981],{},"We started Kinnu knowing that seeding the initial learning content was just the beginning. To reach scale and grow the ability to build new content at scale and pace we would need to start using AI, user generated content (UGC) or a combination of both. ",[25,983,984],{},"The one question that kept coming up in product design meetings was – how many learning pathways are we looking to build, given no constraints around time or resources.",[25,986,987],{},"Our content lead thought the maximum number of pathways should be below 1,000 (and who can blame him!). My initial view, having reviewed Wikipedia’s top topics, was closer to 2,000 – 5,000. Then I asked Chris, who answered with the enigmatic “I think it should be either 0 or infinity”. (“Ever heard of shades of grey, Chris?”). ",[25,989,990],{},"But ultimately, Chris is right – part of the reason people love getting help from human tutors is that they can tailor the response to exactly the learners demand, with full awareness of their context. ",[25,992,993],{},"State-of-the-art generative AI tools such as GPT-3 (for language) and DALL-E (for images), conversational interfaces allow people to stretch the bounds of their creativity to fully test the strength and weaknesses of algorithms. We decided to have some fun.",[32,995,997],{"id":996},"the-french-revolution-experiment","The French Revolution experiment",[25,999,1000],{},"Everyone keeps talking about how great natural language processing (NLP) is, and we saw the incredible progress in text-to-speech processing, so we asked ourselves – “Can AI write one of our learning pathways?”. We picked a topic with lots of documentation across the internet, and also with lots of factual information, which also incidentally we are interested in. Here is how we did it. ",[25,1002,1003],{},"First, a benevolent expert (read: my husband) spent 2 hours writing the detailed plan of the learning pathway. It is divided into 6 tiles or about 40 sections, each of which contains about 2 facts per section. The human writes about 3,000 words all in, including headings.",[25,1005,1006],{},"I copy and pasted each of the 80 detailed fact bullets into the OpenAI sandbox ; it took me about 30 minutes. Then, I picked one of the newest algorithms, DaVinci and let the AI write another 50-100 words per section based on each prompt. The writing is actually pretty decent (if this was a university essay, I’d give it a C+).",[25,1008,1009],{},"But the AI is trained on the web and sometimes it makes hilarious (terrifying?) mistakes. No, Emmanuel Macron is not the king of France (as some satirists would have you believe). No, Donald Trump has nothing to do with the French Revolution (though he might have put some of its principles at stake). No, we do not need a full list of religious orders abandoned by the French Revolution.",[25,1011,1012],{},"What other mistakes and inconsistencies were there? Just to be on the safe side, the benevolent expert reviewed the AI written pathway, made edits and added a few interesting facts which were missing. All in all, the expert was quite impressed with the quality of the work provided by AI, but did not think that without his review and rewrites the content was ready to put into our app. ",[25,1014,1015,1016,1021],{},"It took him another 3 hours to rewrite the rest of the learning pathway content. All in all, it took him 5.5 hours to write a full learning pathway. Now, we need to add one hour to add images, another to generate audio, 3 hours to write questions. We went from nothing to full learning materials in just under 10 hours! Not bad for a first try – the full ",[40,1017,1020],{"href":1018,"rel":1019},"https://docs.google.com/document/d/1TYbz0msp2jMvHmyCTgGiGL6MxIb7zt2oyoFjQcdvXcQ/edit#heading=h.bqlw89qtbyoz",[44],"pathway"," is now available on Kinnu. Following our experiences we wrote up our best practices on working with generative AI.",[32,1023,1025],{"id":1024},"the-tree-of-knowledge","The Tree of Knowledge",[25,1027,1028,1029,1034],{},"But is the expert really needed in the initial stage of figuring out what are the necessary important facts to know about the French Revolution. What if we used the Wikipedia knowledge graph (sidenote: you should ",[40,1030,1033],{"href":1031,"rel":1032},"https://en.wikipedia.org/wiki/Wikipedia:Donate",[44],"donate to Wikipedia"," now if you haven’t already, so many projects stand on the shoulders of this giant!).",[25,1036,1037],{},"We had previously ran some queries to identify top topics in priority markets on Wikipedia and the list reflects surprisingly well on humanity (in addition to sex and gossip people are more interested in philosophy, psychology and religion than I would have thought). Subheadings of the Wikipedia page on French Revolution align surprisingly well with the structure provided by our expert, providing an avenue for future exploration.",[32,1039,1041],{"id":1040},"the-destin-ai-tion","The destin-AI-tion",[25,1043,1044],{},"What else can we automate? Well, we’re going to assume nothing is sacred and will push the boundaries as far as we can, starting with human + computer models and moving towards full automation (fun fact: I learnt this approach when automating myself out of all of my finance jobs at Google). ",[25,1046,1047],{},"We’ve mentioned text generation as one potential way to help content creation but there is much more we can do, which includes NLP-assisted question-answer creation, key concept detection, text summarization and more. The text generation example is given with the help of the OpenAI-based GPT-3. But, we also want to develop our internal know-how which will further enhance and develop our tasks. ",[25,1049,1050],{},"In our work, we want to derive from the breadth of NLP-research, with the focus on the most recent developments which in particular can accelerate pathway creation. We use cutting edge algorithms which elevate deep learning to be used for language-related automation. Those include transformers such as BERT (of which GPT-3 is also an example). ",[25,1052,1053],{},"Questions can be written with an NLP algorithm. Questions can be rephrased by another NLP algorithm. Images can be built with DALL-E or Midjourney.",[25,1055,1056],{},"And the most exciting initiative of them all – imagine a world where you can type “Data science in 5h over one week, I know a little bit, mostly about statistics” – what you get is a personalised content pathway recommendation created from atomic pieces of content, just for you – that’s where we are going.",[25,1058,1059],{},"We are also very keen to see how we can engage our community of learners in helping us build the best learning materials on the planet. There are fantastic examples of how communities co-create knowledge (a big passion of mine). How can we aim this creativity and passion towards this project without replicating the scale and breadth of Wikipedia? But more on that in a future post.",{"title":109,"searchDepth":329,"depth":329,"links":1061},[1062,1063,1064,1065,1066,1067,1068,1069,1070],{"id":885,"depth":329,"text":886},{"id":908,"depth":329,"text":909},{"id":927,"depth":329,"text":928},{"id":940,"depth":329,"text":941},{"id":958,"depth":329,"text":959},{"id":977,"depth":329,"text":978},{"id":996,"depth":329,"text":997},{"id":1024,"depth":329,"text":1025},{"id":1040,"depth":329,"text":1041},"2022-11-16T00:00:00.000Z","Can an AI create any learning pathway you can think of in under 5 seconds? We are about to find out. A deepdive into how Kinnu produces its learning materials.",{},"/blog/research/from-woman-to-machine",{"title":880,"description":1072},{"loc":1074},"blog/research/from-woman-to-machine","from-woman-to-machine.png","_r3YiQdY0wp0xs0NdUXO8wwwuFwYVxex-zOPX7CCW54",{"left":4,"top":4,"width":5,"height":5,"rotate":4,"vFlip":6,"hFlip":6,"body":1081},"\u003Cpath fill=\"none\" stroke=\"currentColor\" stroke-linecap=\"round\" stroke-linejoin=\"round\" stroke-width=\"2\" d=\"m9 18l6-6l-6-6\"/>",1778179164972]