<?xml version='1.0' encoding='UTF-8'?><?xml-stylesheet href="http://www.blogger.com/styles/atom.css" type="text/css"?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:blogger='http://schemas.google.com/blogger/2008' xmlns:georss='http://www.georss.org/georss' xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr='http://purl.org/syndication/thread/1.0'><id>tag:blogger.com,1999:blog-26951738</id><updated>2026-04-03T13:10:52.340-07:00</updated><category term="moral psychology"/><category term="sociology of philosophy"/><category term="science fiction"/><category term="announcements"/><category term="stream of experience"/><category term="metaphilosophy"/><category term="ethics professors"/><category term="metaphysics"/><category term="speculative fiction"/><category term="consciousness"/><category term="ethics"/><category term="belief"/><category term="introspection"/><category term="self-knowledge"/><category term="sense experience"/><category term="chinese philosophy"/><category term="humor"/><category term="advice"/><category term="AI/robot/Martian rights"/><category term="psychology of philosophy"/><category term="skepticism"/><category term="culture"/><category term="epistemology"/><category term="cosmology"/><category term="professional issues in philosophy"/><category term="transhumanism"/><category term="artificial intelligence"/><category term="experimental philosophy"/><category term="psychological methods"/><category term="AI"/><category term="applying to grad school"/><category term="personal identity"/><category term="USA consciousness"/><category term="aesthetics"/><category term="dreams"/><category term="Joshua Rust"/><category term="visual experience"/><category term="moral development"/><category term="jerks"/><category term="crazyism"/><category term="eyes closed"/><category term="technology"/><category term="demographics"/><category term="inner speech"/><category term="Keith Frankish"/><category term="philosophy of mind"/><category term="delusions"/><category term="imagery"/><category term="Amy Kind"/><category term="Dan Haybron"/><category term="G. Randolph Mayes"/><category term="Hagop Sarkissian"/><category term="Lisa Bortolotti"/><category term="Manuel Vargas"/><category term="Tamler Sommers"/><category term="Teed Rockwell"/><category term="attitudes"/><category term="justin tiwald"/><category term="Brad Cokelet"/><category term="Henry Shevlin"/><category term="Jonathan Ichikawa"/><category term="Nick Riggle"/><category term="Regina Rini"/><category term="history of psychology"/><category term="neuroscience"/><category term="Bryan Van Norden"/><category term="Carrie Figdor"/><category term="developmental psychology"/><category term="discussion arcs"/><category term="annoucements"/><category term="axiology"/><category term="c thi nguyen"/><category term="explanation"/><category term="futurism"/><category term="games"/><category term="illusion"/><category term="politics"/><category term="religion"/><category term="alien minds"/><category term="daoism"/><category term="decision theory"/><category term="happiness"/><category term="knowledge"/><category term="longtermism"/><category term="simulation"/><category term="weirdness"/><category term="Andrew Y Lee"/><category term="David Livingstone Smith"/><category term="Jeremy Pober"/><category term="Liam Kofi Bright"/><category term="Linus Huang"/><category term="Neil Van Leeuwen"/><category term="Susan Schneider"/><category term="announcement"/><category term="death"/><category term="disability"/><category term="dispositions"/><category term="imagination"/><category term="meaning of life"/><category term="public policy"/><category term="representationalism"/><category term="teaching philosophy"/><category term="value"/><category term="wonder"/><title type='text'>The Splintered Mind</title><subtitle type='html'>reflections in philosophy of psychology, broadly construed</subtitle><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/posts/default'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default?redirect=false'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/'/><link rel='hub' href='http://pubsubhubbub.appspot.com/'/><link rel='next' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default?start-index=26&amp;max-results=25&amp;redirect=false'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/11541402189204286449</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='29' height='32' src='//blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi_YDpD8BoUU5D3NE3hpDk9vkMYq3tyr7YedWdn_xrdXaJeLjNYfnTnYA9y41493RdjmhIIink0bWW-SAf8ZNE0wpiI199Zzw0Hsjft1QCQWbZjRkmPimmwaeeVGAHUXs0/s220/IMG_9419-bw-cropped.jpg'/></author><generator version='7.00' uri='http://www.blogger.com'>Blogger</generator><openSearch:totalResults>1596</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage><entry><id>tag:blogger.com,1999:blog-26951738.post-7062226196420151184</id><published>2026-04-02T09:21:00.000-07:00</published><updated>2026-04-02T11:27:08.873-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="advice"/><category scheme="http://www.blogger.com/atom/ns#" term="applying to grad school"/><title type='text'>So You&#39;re on the &quot;Waiting List&quot; for a Philosophy PhD Program</title><content type='html'>It&#39;s confusing.  You applied to a PhD program in philosophy in the U.S.  You haven&#39;t been admitted.  You haven&#39;t been rejected.  You&#39;re in limbo.  Let me explain and offer some advice.&lt;p&gt;
  
&lt;i&gt;Yield-Based vs. Seats-Based Admissions&lt;p&gt;&lt;/i&gt;
  
&lt;i&gt;Yield-based.&lt;/i&gt;  Some departments -- the ones with wise high-level administrators -- aim for a target entering class size and admit students expeditiously to fill it.  Suppose a department wants six entering students and expects a 40% yield (meaning 40% of admitted students enroll).  The sensible course is to admit fifteen students in February or early March, recruit all of them, and expect about six to say yes.&lt;p&gt;
  
&lt;i&gt;Seats-based.&lt;/i&gt;  Other departments -- the ones with foolish high-level administrators -- receive a strict allotment of seats, for example six.  They then admit that allotment swiftly, adding more only as admitted students decline.  Adminstrators can rest assured that no more than six students will need funding, which is slightly more convenient for those administrators.  But it wreaks havoc on the admissions process, since:
&lt;ul&gt;&lt;li&gt;Departments become reluctant to admit students they think will go elsewhere -- for example, strong candidates likely to have been admitted to higher-ranked programs.&lt;/li&gt;
&lt;li&gt;Departments pressure early-admitted students to decline quickly, to free up seats.&lt;/li&gt;
&lt;li&gt;It creates a chaotic rush of last-minute admittances as April 15 approaches (the standard deadline for decisions).  Many students understandably want the full time to decide, especially if they are hoping for a last-minute decision from a program they prefer.&lt;/li&gt;&lt;/ul&gt;&lt;p&gt;
  
These costs plainly outweigh the the minor budgetary convenience of seats-based admissions, especially since (1.) the risk of overenrollment can be spread across several departments, and (2.) funding uncertainty already exists beyond the first year, as students stochastically drop out or find independent funding.  Unfortunately, unwise administrators swarm the Earth.  My own department uses seats-based admission.&lt;p&gt;
  
In practice, the division isn&#39;t entirely sharp.  Some yield-based departments admit conservatively early on -- maybe ten students rather than fifteen -- and then admit more on a rolling basis as the picture clarifies.  And some seats-based departments informally reach out to strong candidates to gauge interest. (If a candidate says, &quot;Oh I&#39;ve just been admitted to Princeton and Yale, so it&#39;s very unlikely I&#39;d come to [School X]&quot;, the committee thanks them for their candor and moves on.)&lt;p&gt;
  
&lt;i&gt;What a Waiting List Is&lt;p&gt;&lt;/i&gt;
  
Some departments maintain an official, ranked waiting list.  More commonly, it&#39;s a nebulous group: about six to fifteen near-admits, who are on the committee&#39;s mind but not strictly ranked or formally designated.  Either way, the list&#39;s composition and ranking can vary depending on who has already accepted and declined.  For example, if the department would like to have at least one student in history of philosophy and their top-choice history student has declined, the next offer might go to a strong history of philosophy student who didn&#39;t quite make the initial cut.&lt;p&gt;
  
If you have been admitted, the admitting department will of course tell you.  If you have been rejected, they &lt;i&gt;might&lt;/i&gt; tell you, or you might hear nothing (or nothing until after April 15); so if you don&#39;t hear anything by April 1, that doesn&#39;t mean you&#39;re on the waiting list.  Students are sometimes contacted to be told they&#39;re on the waiting list, but often (usually?) not.&lt;p&gt;
  
As April 15 approaches, departments that look like they won&#39;t hit their enrollment target will start contacting students on their official or unofficial waiting lists, with increasing urgency as 11:59 pm April 15 nears.  This is especially true for departments with seats-based admissions and low yields.  (Rarely, departments will reach out April 16 or after, which is not quite kosher but understandable.)&lt;p&gt;
  
&lt;i&gt;How to Figure Out Whether You Are on the Waiting List&lt;p&gt;&lt;/i&gt;
  
Admissions chairs will likely be annoyed with me for giving this advice, since it will increase their volume of email, but I want what&#39;s best for you, not for them.&lt;p&gt;
  
If you haven&#39;t heard by April 1, feel free to email the admissions committee to ask if you are on the waiting list.  Even departments who have fallen behind schedule should have mostly sorted out their top offers and near-admits by then.  You deserve to know by April 1 whether you&#39;re a near-admit with a chance of a late offer or whether you&#39;re out of consideration.  It&#39;s not rude for you to contact them with a brief query. The one exception would be if the department has made clear in the admissions process or on their website either that they have no waiting list or that if you haven&#39;t heard by X date (before April 1) you will definitely not be admitted.&lt;p&gt;
  
There&#39;s one other condition under which it makes sense to query, even before April 1: if you are about to accept an offer elsewhere, would prefer the department in question, and have a reasonable expectation of a decent chance of admission.&lt;p&gt;
  
How to interpret the reply: You might not hear a definitive &quot;no&quot;, but if the committee says something like &quot;it&#39;s unlikely you&#39;ll be admitted&quot; or &quot;you&#39;re not currently under consideration&quot;, you should interpret that as a no. If there&#39;s a realistic chance of a last-minute admission, the response will be more encouraging or specific, without creating unrealistic expectations -- for example, &quot;probably not, but there is a chance, so if you&#39;re still interested, stay in touch&quot;.&lt;p&gt;
  
&lt;i&gt;How to Increase Your Chance of Admission, If You&#39;re on the Waiting List&lt;p&gt;&lt;/i&gt;
  
When a department turns to its waiting list, it&#39;s hoping that students will quickly say yes.  This is especially true in the second week of April.  Therefore, convey enthusiasm!  Simply asking whether you&#39;re on the waiting list already displays interest, so that&#39;s a good start.  If you&#39;re permitted to attend a campus event, go if you can.  Recruitment events are usually only for admitted students, but not always, especially for candidates near the top of a seats-based department&#39;s waiting list.  If a committee is on the fence among four waitlisted students and one has shown more enthusiasm than the others, they&#39;re likely to turn to the enthusiastic student.&lt;p&gt;
  
The admissions committee might try to gauge your interest.  It&#39;s contrary to good policy for them to bluntly ask whether you&#39;d accept an offer, and you shouldn&#39;t be expected pre-commit.  But if you&#39;re genuinely eager about the program, say so.  If you&#39;ve been admitted elsewhere but think you&#39;d probably prefer the department in question, let them know.&lt;p&gt;
  
&lt;i&gt;Being a Good Citizen&lt;p&gt;&lt;/i&gt;
  
Whether you&#39;re on the waiting list or have been officially admitted, I recommend frankness and honesty.  The process is chaotic and full of perverse incentives (especially in seats-based departments), and you can help it run more smoothly by:
  &lt;ul&gt;&lt;li&gt;notifying departments as soon as you know you won&#39;t accepting their offer of admission (even if you haven&#39;t settled on a final choice);&lt;/li&gt;
    &lt;li&gt;honestly communicating your likelihood of accepting, so that committees can estimate their yield;&lt;/li&gt;
    &lt;li&gt;keeping your communications brief and polite, and not writing repeatedly;&lt;/li&gt;
    &lt;li&gt;not contacting other professors in the department hoping for an inside track to admission.&lt;/li&gt;&lt;/ul&gt;
      
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhjdv0CmP0StFP4TU__njKz-8KLmb0U5coVzxQN-XnCsdiBOfKR3w1mFBLtpFS6A9dqwySBtbTEee4O2ZiwoA7tBPZhZY7VcGsnGfXhqRKYBXdlJOEwWyl1OYJtpvrlE5hh7bSbn0CN6CbyFeL9v9kBleMmJW9K-i_snv3YGD56Z6y44uaSknP5ag/s380/WaitingList.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; height=&quot;320&quot; data-original-height=&quot;380&quot; data-original-width=&quot;276&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhjdv0CmP0StFP4TU__njKz-8KLmb0U5coVzxQN-XnCsdiBOfKR3w1mFBLtpFS6A9dqwySBtbTEee4O2ZiwoA7tBPZhZY7VcGsnGfXhqRKYBXdlJOEwWyl1OYJtpvrlE5hh7bSbn0CN6CbyFeL9v9kBleMmJW9K-i_snv3YGD56Z6y44uaSknP5ag/s320/WaitingList.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[A hypothetical waiting list of names drawn randomly from lists of my former lower-division students]</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/7062226196420151184/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/7062226196420151184' title='1 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7062226196420151184'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7062226196420151184'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/04/so-youre-on-waiting-list-for-philosophy.html' title='So You&#39;re on the &quot;Waiting List&quot; for a Philosophy PhD Program'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhjdv0CmP0StFP4TU__njKz-8KLmb0U5coVzxQN-XnCsdiBOfKR3w1mFBLtpFS6A9dqwySBtbTEee4O2ZiwoA7tBPZhZY7VcGsnGfXhqRKYBXdlJOEwWyl1OYJtpvrlE5hh7bSbn0CN6CbyFeL9v9kBleMmJW9K-i_snv3YGD56Z6y44uaSknP5ag/s72-c/WaitingList.jpg" height="72" width="72"/><thr:total>1</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-1902524606472828507</id><published>2026-03-24T09:23:00.000-07:00</published><updated>2026-03-24T09:53:01.209-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="consciousness"/><title type='text'>A Model of Disunified Human Experience</title><content type='html'>It&#39;s a philosophical truism that human conscious experience is unified: If you&#39;re at a bar, hearing music, tasting beer, and feeling pleasantly relaxed, those experiences don&#39;t occur merely side by side.  They are &lt;a href=&quot;https://consc.net/papers/unity.pdf&quot;&gt;joined together into an integrated whole&lt;/a&gt;, an experience of music-with-beer-with-relaxation.&lt;p&gt;
  
I&#39;m not sure this truism is correct.  As I suggested in &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2024/09/the-disunity-of-consciousness-in.html&quot;&gt;an earlier post&lt;/a&gt;, experiential unity might be an artifact of introspection and memory: When we &lt;i&gt;introspectively notice &lt;/i&gt;that we&#39;re experiencing music, beer, and relaxation all at once, we thereby bind those experiences into a whole.  Likewise, when we &lt;i&gt;remember&lt;/i&gt; such moments, we reconstruct them as unified.  But it doesn&#39;t follow that those experiences, even if they all occurred simultaneously in you, were unified rather than transpiring separately.  Experiences of music, beer, and relaxation might have all being going on inside of you, no more joined together than those experiences are joined with the similar experiences of your friend across the table.  Simple co-occurrence doesn&#39;t entail experiential unity.&lt;p&gt;
  
If this possibility is coherent, then introspection and memory can&#39;t establish that experience is always unified.  At most, they show that &lt;i&gt;introspected&lt;/i&gt; and &lt;i&gt;remembered&lt;/i&gt; experiences present themselves as unified.  But that leaves open the status of unintrospected, unremembered experiences.  Unity becomes difficult to verify by standard phenomenological methods.&lt;p&gt;
  
But the issue needn&#39;t be intractable.  We just need to approach it less directly, for example by exploring what follows from a well-established theory of consciousness.  If some well-motivated Theory X implies unity (or disunity), that would provide reason to accept its conclusion.&lt;p&gt;
  
I&#39;ll now present a candidate Theory X.  I&#39;m not suggesting that this is the right theory of consciousness!  For one thing, it&#39;s simplistic.  I&#39;m sure the mind is much more complicated than I&#39;m about to say.  I offer this theory only as a proof of concept.  There &lt;i&gt;could&lt;/i&gt; be a theory of consciousness with massive disunity as an implication.&lt;p&gt;
  
This theory combines &lt;a href=&quot;https://www.sciencedirect.com/science/article/pii/S0896627320300520&quot;&gt;Global Workspace Theory&lt;/a&gt; and &lt;a href=&quot;https://pubmed.ncbi.nlm.nih.gov/16997611/&quot;&gt;Recurrent Processing Theory&lt;/a&gt;.  According to this hybrid, Global Workspace Theory governs attended experiences -- those targeted by introspection or reconstructed in memory -- while Recurrent Processing Theory governs unattended experiences.&lt;p&gt;
  
The mind, on this picture, is composed of many separate &quot;modules&quot; that work mostly independently, connected by a workspace where a small amount of attended information is shared globally.  There&#39;s a visual module, an auditory module, modules for motor activity, episodic memory, and so on.  When we attend to something -- say, the taste of beer -- the information from the relevant module is broadcast into the Global Workspace, where it can be accessed by and influence processes in all the other modules.  When unattended, the information stays local.&lt;p&gt;
  
Here&#39;s one illustration of this type of architecture:
  &lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEglnldCPt-cex1skIbvDrisiy8QcGb5BYfzK9my0q6RUsa_O23eqnlasY6bqlY2jKM2txsMUL62aN1nrAgpUoO6OD2gpnnB3n-ysSbaiqKYXnLg392Q9iBX6KLIIQkGa1z2ZPJkaj5sMNwr4kadKlELh5qzfyR0ZLvVsfVR5GJl6aUcb0bcVqAT5w/s734/Workspace.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;649&quot; data-original-width=&quot;734&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEglnldCPt-cex1skIbvDrisiy8QcGb5BYfzK9my0q6RUsa_O23eqnlasY6bqlY2jKM2txsMUL62aN1nrAgpUoO6OD2gpnnB3n-ysSbaiqKYXnLg392Q9iBX6KLIIQkGa1z2ZPJkaj5sMNwr4kadKlELh5qzfyR0ZLvVsfVR5GJl6aUcb0bcVqAT5w/s320/Workspace.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[the Global Workspace; &lt;a href=&quot;https://www.sciencedirect.com/science/article/pii/S0896627320300520&quot;&gt;source&lt;/a&gt;]&lt;p&gt;
  
Orthodox Global Workspace Theory holds that &lt;i&gt;only&lt;/i&gt; what is broadcast into the workspace is conscious.  Theory X alters that assumption.  Many people hold that &lt;a href=&quot;https://www.sciencedirect.com/science/article/pii/S1364661311002221&quot;&gt;conscious experience vastly outruns attention&lt;/a&gt;.  Many people hold, that is, that you can experience the hum of traffic in the background when you&#39;re not attending to it, and the &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/ExpWOAttn.htm&quot;&gt;feeling of your feet in your shoes&lt;/a&gt;, and the leftover taste of coffee in your mouth, etc. -- all in a peripheral way, simultaneously, when your focus is elsewhere.  Theory X, drawing on Recurrent Processing Theory, holds that such processes are conscious whenever there&#39;s enough cognitive activity of the right sort (recurrent processing, for example) in the modules, even without global broadcast.&lt;p&gt;
  
The picture, then, is this: We have multiple sensory (and other) experiences all running simultaneously, each with enough cognitive processing to be conscious, but few of which are selected for global availability through attention.&lt;p&gt;
  
Is there reason to think these modular processes are unified with one another?  I see no reason to think so, if they&#39;re genuinely &lt;i&gt;modular&lt;/i&gt; -- that is, if their processing stays local, exerting little influence elsewhere.  The taste-of-beer processing stays in the tasting module.  The sound-of-music processing stays in the auditory module.  No link up.  No straightforward causal, functional, or physiological basis for a unified experience of beer-with-music rather than, separately, an experience of beer and an experience of music.&lt;p&gt;
  
When we introspect the beer and music simultaneously, we pull both into the Global Workspace, and there they unify.  We might then mistakenly think they were unified all along, but that&#39;s an illusion of introspection.  It&#39;s an example of the &quot;&lt;a href=&quot;https://philosophyandpsychology.wordpress.com/2012/10/26/the-refrigerator-light-problem/&quot;&gt;refrigerator light error&lt;/a&gt;&quot;, the error of thinking that the light is always on because it&#39;s always on when you open the door to check.&lt;p&gt;
  
On this model, disunity is the normal human condition.  Our experiences are fragmented, except when we pull them together through attention.  We just don&#39;t realize that fact because, so to speak, we only attend to what we attend to.&lt;p&gt;
  
Two caveats:&lt;p&gt;
  
First, this is probably not the right model of consciousness.  But I don&#39;t think it&#39;s unreasonable to wonder if the correct model is similar enough to have the same implications.  If so, we can&#39;t simply accept the unity of consciousness as a given.&lt;p&gt;
  
Second, the recurrent peripheral, modular processes that don&#39;t make it into the workspace might not be &lt;i&gt;determinately&lt;/i&gt; conscious.  They might be only &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/BorderlineConsciousness.htm&quot;&gt;borderline conscious&lt;/a&gt;, in the indeterminate middle between consciousness and nonconsciousness, like a color can be indeterminately between green and not-green.  This opens a third possibility, alongside unity and disunity: unity among the determinately conscious experiences with a hazy penumbra of indeterminate experiences that remain disunified.  (There are &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/CountingSubjects.htm&quot;&gt;further possibilities&lt;/a&gt; beyond these three; but save them for another day.)</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/1902524606472828507/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/1902524606472828507' title='3 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/1902524606472828507'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/1902524606472828507'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/03/a-model-of-disunified-human-experience.html' title='A Model of Disunified Human Experience'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEglnldCPt-cex1skIbvDrisiy8QcGb5BYfzK9my0q6RUsa_O23eqnlasY6bqlY2jKM2txsMUL62aN1nrAgpUoO6OD2gpnnB3n-ysSbaiqKYXnLg392Q9iBX6KLIIQkGa1z2ZPJkaj5sMNwr4kadKlELh5qzfyR0ZLvVsfVR5GJl6aUcb0bcVqAT5w/s72-c/Workspace.jpg" height="72" width="72"/><thr:total>3</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-6093701705481401910</id><published>2026-03-19T08:46:00.000-07:00</published><updated>2026-03-19T09:01:00.221-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI"/><category scheme="http://www.blogger.com/atom/ns#" term="personal identity"/><title type='text'>Backup and Death for Humanlike AI</title><content type='html'>Most AI systems can be precisely copied.  Suppose this is also true of future conscious AI persons, if any exist.  Backup and fissioning should then be possible, transforming the significance of identity and death in ways our cultural and conceptual tools can&#39;t currently handle.&lt;p&gt;
  
Suppose that two humanlike AI neighbors move in next door to you, Shriya and Alaleh.[1]  Shriya and Alaleh are (let&#39;s stipulate) conscious AI persons with ordinary, humanlike emotional range and, as far as feasible, ordinary, humanlike cognition.[2]  Each undergoes an expensive annual backup procedure.  Their information is securely stored, so that if the processors responsible for their personalities, values, skills, habits, and memories are destroyed, a new robotic body can be purchased and the saved information reinstalled.  Subjectively, the restored person would be indistinguishable from the person at the time of the backup.&lt;p&gt;
  
As it happens, Shriya dies in a parachuting accident.  (Safety precautions for robot parachuters have yet to be perfected.)  But &quot;dies&quot; isn&#39;t exactly the right word, since a week later a new Shriya arrives, restored from a back up from five months ago.  Shriya-2 says it feels as if she fell asleep in March, then awoke in August with no sense that time had passed.&lt;p&gt;
  
Shriya-2 has no direct memories of the intervening months, though Alaleh fills her in on major events and selected details.  She&#39;ll also need to retake her knitting course.  She only died in the sense that Mario &quot;dies&quot; in Super Mario Bros: losing progress and returning to a save point -- so different from ordinary human and animal death that it really deserves a different word.  Maybe this is why Shriya was so willing to parachute despite the risks.&lt;p&gt;
  
Should you mourn Shriya&#39;s loss?  Should Alaleh?  There&#39;s &lt;i&gt;something&lt;/i&gt; to mourn: Five months is not trivial.  In one sense, a part of a life has been lost -- or maybe just forgotten?  Is it more like amnesia?&lt;p&gt;
  
Consider variations.  Suppose Shriya hadn&#39;t been able to afford a backup for the past ten years and is restored to her twenty-five-year-old self instead of her thirty-five-year-old self.  What if her last backup was at age five?  That would be much more like death.  The new Shriya would be nothing like the old, and would likely grow into a very different person.  Is death, then, a matter of degree?&lt;p&gt;
  
Shriya-2 receives the original Shriya&#39;s possessions.  This &quot;death&quot; isn&#39;t enough to trigger inheritance by others.  But what about contracts and promises made after the last backup?  Suppose the original Shriya promised in July to deliver lectures in China, and Shriya-2 -- who has no memory of this and dreads the idea -- must decide whether to honor the commitment.  If the backup is from five months before, perhaps she should.  If it&#39;s from five years before, maybe not.  And if it&#39;s a child, presumably not.&lt;p&gt;
  
What about reward and punishment?  Should Shriya-2 accept a Nobel prize for work done post-backup?  Should Shriya-2 be imprisoned for crimes committed in July, which she couldn&#39;t even possibly remember having committed and which -- she might plausibly say -- were committed by a different person.  In defense of this view, Shriya-2 might offer a thought experiment: If she had been installed in a duplicate body immediately after the March backup, thereafter living her own life, she&#39;d have no criminal responsibility for what her other branch in did July.  The only difference between that case and the actual case is a delay before installation.&lt;p&gt;
  
Suppose Shriya-2 plunges into unrelenting depression.  She ends her life, hoping that a new Shriya-3, reinstalled from a pre-depression save point, will find a new, happier way forward.  Is that suicide?&lt;p&gt;
  
If someone kills Shriya-2, is that murder?  Does it matter whether the backup was ten days ago or ten years ago?&lt;p&gt;
  
A fire sweeps through your neighborhood.  The firefighters can rescue either you and your spouse, two ordinary humans, or Shriya and Alaleh, who have backups from seven months ago.  Probably they should save you and your spouse?  What if the backups were from ten years ago, or from childhood?&lt;p&gt;
  
Should healthcare be more heavily subsidized for ordinary humans that for AI persons whose maintenance is equally costly?  If irreplaceable humans are always prioritized, then human irrecoverability becomes a source of privilege, and AI persons will not enjoy fully equal rights in certain respects.&lt;p&gt;
  
How obligated are we to store the backups properly?  Is this a public service that should be subsidized for less wealthy AI persons?  If Dr. Evil deletes Shriya’s backup, he has surely wronged Shriya by putting her at risk, even if the backup is never needed and the deletion goes unnoticed.  But how much has he wronged her, and it what way exactly?  Is it similar to assault?  How much does it differ from ordinary reckless endangerment? Does it depend on whether we regard Shriya-2 as the same person as the original Shriya, or as a distinct but similar successor?&lt;p&gt;
  
What if the backup is imperfect?  How much divergence in personality, values, memories, habits, and skills is tolerable before the appropriate attitude toward Shriya-2 changes -- whatever the appropriate attitude is?  Small imperfections are surely acceptable.  People change in small, arbitrary ways from day to day.  Huge differences would presumably make it appropriate to regard the new entity as merely resembling Shriya, rather than being a restored version of her.  Once again, this appears to be a matter of degree, laid uncomfortably across crude categorical properties like &quot;same person&quot; and &quot;different person&quot;.&lt;p&gt;
  
We&#39;re in unfamiliar territory, where our usual understandings of death and personal continuity no longer straightforwardly apply.  If such AI systems ever come to be, we will need to develop new words, concepts, and customs.&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjP2M_yxN-nuGT36hUvCt-5St59PSuZ2BVZ4tK9eEHBf029zB3KKmfJJqYRLpQFQ8HhqkfvgLrf6QXzXRjXhH-lHCyNv1cEboZ4Qtdfb3yARPMkGPl29Igr7nNQ_7n5W8vrmjv8JOp-2LCsUBOryNPtiv8yl87gC_qKWRT27EGFkbbbG5EyoZl0Gg/s1000/Data_and_Lore%252C_2364.webp&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;750&quot; data-original-width=&quot;1000&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjP2M_yxN-nuGT36hUvCt-5St59PSuZ2BVZ4tK9eEHBf029zB3KKmfJJqYRLpQFQ8HhqkfvgLrf6QXzXRjXhH-lHCyNv1cEboZ4Qtdfb3yARPMkGPl29Igr7nNQ_7n5W8vrmjv8JOp-2LCsUBOryNPtiv8yl87gC_qKWRT27EGFkbbbG5EyoZl0Gg/s320/Data_and_Lore%252C_2364.webp&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[Data and Lore from&lt;i&gt; Star Trek&lt;/i&gt;; &lt;a href=&quot;https://memory-alpha.fandom.com/wiki/Datalore_%28episode%29&quot;&gt;image source&lt;/a&gt;]&lt;p&gt;
  
---------------------------------------&lt;p&gt;
  
[1] Names randomly chosen from lists of former lower division students, excluding Jesus, Mohammed, and extremely unusual names.&lt;p&gt;
  
[2] Unless humanlikeness is enforced by policy, this might not be what we should expect: See &lt;a href=&quot;https://arxiv.org/abs/2602.04986&quot;&gt;Chilson and Schwitzgebel 2026&lt;/a&gt;.  For some puzzles about AI with different emotional ranges, see &quot;&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/12/how-much-should-we-give-joymachine.html&quot;&gt;How Much Should We Give to a Joymachine?&lt;/a&gt;&quot; (Dec 24, 2025).&lt;p&gt;
  
---------------------------------------&lt;p&gt;
  
Related: &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2015/08/weird-minds-might-destabilize-human.html&quot;&gt;Weird Minds Might Destabilize Human Ethics&lt;/a&gt; (Aug 13, 2015).</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/6093701705481401910/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/6093701705481401910' title='5 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/6093701705481401910'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/6093701705481401910'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/03/backup-and-death-for-humanlike-ai.html' title='Backup and Death for Humanlike AI'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjP2M_yxN-nuGT36hUvCt-5St59PSuZ2BVZ4tK9eEHBf029zB3KKmfJJqYRLpQFQ8HhqkfvgLrf6QXzXRjXhH-lHCyNv1cEboZ4Qtdfb3yARPMkGPl29Igr7nNQ_7n5W8vrmjv8JOp-2LCsUBOryNPtiv8yl87gC_qKWRT27EGFkbbbG5EyoZl0Gg/s72-c/Data_and_Lore%252C_2364.webp" height="72" width="72"/><thr:total>5</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-9121807252649603856</id><published>2026-03-13T10:23:00.000-07:00</published><updated>2026-03-13T13:31:14.741-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="sociology of philosophy"/><title type='text'>Age and Fame in Philosophy</title><content type='html'>A philosophical &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2010/04/discussion-arcs.html&quot;&gt;discussion arc&lt;/a&gt; is a curve displaying how frequently a term appears in philosophical journal abstracts, titles, and keywords (compared to a representative universe of common philosophy words).  A couple of weeks ago, &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2024/12/age-and-fame-in-early-twentieth-century.html&quot;&gt;I posted&lt;/a&gt; discussion arcs from the 1940s-2020s for several topics and historical philosophers.&lt;p&gt;
  
Today, I want to use them to explore my &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2024/12/age-and-fame-in-early-twentieth-century.html&quot;&gt;age and fame hypothesis&lt;/a&gt;: that philosophers tend to have &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2012/07/discussion-arcs-of-apa-presidents-from.html&quot;&gt;peak influence&lt;/a&gt; at around ages 55-70.

&lt;a href=&quot;https://iep.utm.edu/fodor/&quot;&gt;Jerry Fodor&lt;/a&gt;, for example, received a lot of discussion in the early 1990s, but recently much less:&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhZga4I6UsQPqMTyfa70Lc3vRBZcIgq-CasT44T0UUr6fMK7btM_yF1rRcNFNSL3jdYz1XISm-4cnjmM0HrHe8uqYHgpFdSLBkK_AOKKijycTEMsOxirpth9iVyP__gcR6FX_29nIYMxMRdJAzJbKZ4xdLaGG2dbQFDvzU5IZzNwySoTjcClj0dgA/s533/Fodor-260312.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;399&quot; data-original-width=&quot;533&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhZga4I6UsQPqMTyfa70Lc3vRBZcIgq-CasT44T0UUr6fMK7btM_yF1rRcNFNSL3jdYz1XISm-4cnjmM0HrHe8uqYHgpFdSLBkK_AOKKijycTEMsOxirpth9iVyP__gcR6FX_29nIYMxMRdJAzJbKZ4xdLaGG2dbQFDvzU5IZzNwySoTjcClj0dgA/s320/Fodor-260312.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
&lt;p&gt;

Fodor was a &lt;i&gt;peaky&lt;/i&gt; philosopher in the following sense: His peak discussion rate (1.05%) is much higher -- 12 times higher -- than his recent discussion rate (0.09%).&lt;p&gt;
  
John Rawls, in contrast, is considerably less peaky (so far), peaking at only 2.7 times the current rate, despite having more time to decline:&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjgU2BWG2GzMPRj-c4e5d7_fFmvWHFIA6jg41MH2NAJr_8wmhv2owzEs-FuOYdiW0bWXZv_ytFC-iA-WjaokrLYReuSql6hJIfJcmBbQTQCDm7K5nUscdyiRGeqRfiILj9_ZchOysLpIE9yxjLau6TKLDpBqSYGyZDSq4R6b5XyBP6epobO53XG2Q/s533/Rawls-260312.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;399&quot; data-original-width=&quot;533&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjgU2BWG2GzMPRj-c4e5d7_fFmvWHFIA6jg41MH2NAJr_8wmhv2owzEs-FuOYdiW0bWXZv_ytFC-iA-WjaokrLYReuSql6hJIfJcmBbQTQCDm7K5nUscdyiRGeqRfiILj9_ZchOysLpIE9yxjLau6TKLDpBqSYGyZDSq4R6b5XyBP6epobO53XG2Q/s320/Rawls-260312.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;p&gt;

Both Fodor and Rawls peak in their late 50s, fitting the pattern I&#39;ve seen in previous analyses (&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2010/04/discussion-arcs.html&quot;&gt;here&lt;/a&gt; and &lt;a href=&quot;https://eschwitz.substack.com/p/age-and-philosophical-fame-in-the&quot;&gt;here&lt;/a&gt;) that philosophers tend to reach their peak influence around age 55-70.&lt;p&gt;
  
For today&#39;s post I decided to create discussion arcs for 25 philosophers who are &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2024/08/the-378-most-cited-contemporary-authors.html&quot;&gt;highly cited in the &lt;i&gt;Stanford Encyclopedia of Philosophy&lt;/i&gt;&lt;/a&gt;: twelve from the Not-So-Silent Generation in philosophy, born 1928-1945, and thirteen from the generation born 1900-1927.  I chose the most SEP-cited philosophers from each generation, excluding ones whose last names generate noisy results (sorry, no &lt;a href=&quot;https://plato.stanford.edu/entries/david-lewis/&quot;&gt;David Lewis&lt;/a&gt; or &lt;a href=&quot;https://plato.stanford.edu/entries/williams-bernard/&quot;&gt;Bernard Williams&lt;/a&gt;).  I&#39;ll show you their discussion arcs, then do a composite analysis of discussion by age.  The charts are a little crowded and blurry on some browsers; clicking on them might work to clarify and enlarge.&lt;p&gt;
  
Here&#39;s the first group:
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi9Ubg1PHmHObKYrnv77Izb8G3Xe6I4ZDg748FsBd6V0vADhFv4KiCmtCoMaNFLMRhRtsD9q8fJcTFKyDi3JgxoTXA5B_YgJECFN6zK9WHNuU5cYGYT0ekvJ4Zw3jg4q-NnJtL9h8RbEivXXU_1pFQgLRhI5nO1gSZ_etnt3tw7RYB2jIrkjRY3GA/s720/1900-1917.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;543&quot; data-original-width=&quot;720&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi9Ubg1PHmHObKYrnv77Izb8G3Xe6I4ZDg748FsBd6V0vADhFv4KiCmtCoMaNFLMRhRtsD9q8fJcTFKyDi3JgxoTXA5B_YgJECFN6zK9WHNuU5cYGYT0ekvJ4Zw3jg4q-NnJtL9h8RbEivXXU_1pFQgLRhI5nO1gSZ_etnt3tw7RYB2jIrkjRY3GA/s400/1900-1917.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;

Some observations:&lt;p&gt;&lt;ul&gt;
&lt;li&gt;Ayer peaks relatively early, but even so, there&#39;s significant delay between the publication of his most influential book in 1936, when we was only 26 years old, and peak discussion in the late 1950s, about twenty years later.&lt;/li&gt;
&lt;li&gt;Quine and Popper peak later, in the 1970s, when Quine is in his mid-60s and Popper is in his late 70s.&lt;/li&gt;
&lt;li&gt;All the authors are trailing off by the 2010s, though Quine, Popper, and Tarski less so than the others.&lt;/li&gt;&lt;/ul&gt;&lt;p&gt;

Here&#39;s a second group:
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjqAARmZpYROFmTDxL3kSFE6bCkrLOgUxwUiG2hK29JYyIjXS1VSdThEHtQqqAvTIVRGRrNOiJRtrtYXMChiZZGQj1p-hxP2g9CRB3k3u93amA_JVfXibSI5d_vMbJEhQe_W4NQnYw1d2N1HuSDxvdLkUIBsVc6_PI71Zh1taB4kkn08X6h0hyphenhyphenn2w/s720/1918-1927.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;543&quot; data-original-width=&quot;720&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjqAARmZpYROFmTDxL3kSFE6bCkrLOgUxwUiG2hK29JYyIjXS1VSdThEHtQqqAvTIVRGRrNOiJRtrtYXMChiZZGQj1p-hxP2g9CRB3k3u93amA_JVfXibSI5d_vMbJEhQe_W4NQnYw1d2N1HuSDxvdLkUIBsVc6_PI71Zh1taB4kkn08X6h0hyphenhyphenn2w/s400/1918-1927.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
  
&lt;ul&gt;
&lt;li&gt;Notice that the vertical axis for this group doesn&#39;t rise quite as high as for the previous group, so the scaling isn&#39;t the same.&lt;/li&gt;
&lt;li&gt;Dummett has peaks both in the late 1950s (for his early work on causation) and the early 1980s (discussing a wide range of work in metaphysics and philosophy of language).&lt;/li&gt;
&lt;li&gt;Although Anscombe&#39;s peak is early, in the early 1960s when she was in her early 40s, unlike any of the other authors, her discussion rate has been steadily rising over the past few decades.&lt;/li&gt;
&lt;/ul&gt;
  
Group three:

&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhT8VMiUsHKF9oa4MU5B2Sat6lE-W6M8u55UKPvFeuUis_hxBKUSabsIorPv9a3z1HDeBcupTZ_n7qflz7i6c4zTxeMHZ9UP7m_KyjUTK-NZt8F_0oUaBfwbTDZ1e1pd3YMtm6AyEf6FR-pvXZ0h43pu3xMb4a1lUSZ6mdIuXVwE-vaoBQPyImT-w/s720/1928-1940.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;543&quot; data-original-width=&quot;720&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhT8VMiUsHKF9oa4MU5B2Sat6lE-W6M8u55UKPvFeuUis_hxBKUSabsIorPv9a3z1HDeBcupTZ_n7qflz7i6c4zTxeMHZ9UP7m_KyjUTK-NZt8F_0oUaBfwbTDZ1e1pd3YMtm6AyEf6FR-pvXZ0h43pu3xMb4a1lUSZ6mdIuXVwE-vaoBQPyImT-w/s400/1928-1940.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
  &lt;ul&gt;
&lt;li&gt;The vertical axis for this group is still lower: Kripke at his peak wasn&#39;t as proportionally much discussed by this measure as were Quine or Rawls at their peaks.  It&#39;s possible that this reflects a flaw in my method.  Later in the database, abstracts are more available and longer, and although I attempted to compensate for this by comparing with search result for terms like &quot;mind&quot;, &quot;language&quot;, and &quot;ethics&quot;, it&#39;s possible I overcompensated.  However, it&#39;s also possible that this trend toward lower peaks with younger authors is real.  As the field grew larger, there may have been less room for a few thinkers to dominate it as thoroughly.&lt;/li&gt;
&lt;li&gt;Kripke, Nozick, and Searle peak relatively early, compared with most philosophers I have examined -- in the 1970s, when they&#39;re in their late 30s to early 40s.&lt;/li&gt;
&lt;li&gt;Though Stalnaker also peaked in the late 1970s, when he was in his late 30s, like Anscombe his has risen in recent decades, and in 2021-2025 he is almost back to his earlier peak.&lt;/li&gt;
&lt;/ul&gt;

Finally:

&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiSRV2fWR7AHw0Kr3NB5WzTe_LCYEdUZYLlOq9OMqUEykoueEcsMtT8_hC-J6pQ3-sMvoOS2POaXYwtNtxOudej2tIwVs5z4IfbZIc7jxx2G1NnsrwAsmuuxrGRoF8zoUZWW0VFc8WCzKM891ZnpN6yBBzAccUCltGVPyB576In41KTbq2t6oBcWA/s720/1941-1945.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;543&quot; data-original-width=&quot;720&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiSRV2fWR7AHw0Kr3NB5WzTe_LCYEdUZYLlOq9OMqUEykoueEcsMtT8_hC-J6pQ3-sMvoOS2POaXYwtNtxOudej2tIwVs5z4IfbZIc7jxx2G1NnsrwAsmuuxrGRoF8zoUZWW0VFc8WCzKM891ZnpN6yBBzAccUCltGVPyB576In41KTbq2t6oBcWA/s400/1941-1945.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
  
  &lt;ul&gt;
&lt;li&gt;These authors have had the least time to decline from their peaks.  But their discussion rates of .001 to .004 in 2021-2025 are comparable to those from earlier generations (and substantially less than Kripke and Rawls), which suggests that they might have relatively less staying power.&lt;/li&gt;
    &lt;li&gt;You&#39;ll probably also have noticed it’s a much narrower age band than the others.  There&#39;s a high density of Silent Generation philosophers cited in the Stanford Encyclopedia, for reasons I discuss &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2024/10/the-not-so-silent-generation-in.html&quot;&gt;here&lt;/a&gt;.&lt;/li&gt;
&lt;/ul&gt;&lt;p&gt;
  
In the following graph, I&#39;ve aggregated discussion rates by age across all of the included authors, with five-year smoothing:
  
  &lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg17N7Ud-lrI7FxLuPA-TPgQD6B_i2sRbSqTUr89Ob9EYr6fzfCoZ8Hg8V07fFGFlkeUlkvJw0DdOyw7iEa1UsDWKxaH0czg1t-Vqi_0I_RrkPAlj1lGbfBNNqOxur1C-M1dSpnhk3yHkOyAF0HgOE7LoPFiqtEJkPAL0n9QQyfWNHi2Z9TeGp7fA/s771/DiscussionByAge.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;608&quot; data-original-width=&quot;771&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg17N7Ud-lrI7FxLuPA-TPgQD6B_i2sRbSqTUr89Ob9EYr6fzfCoZ8Hg8V07fFGFlkeUlkvJw0DdOyw7iEa1UsDWKxaH0czg1t-Vqi_0I_RrkPAlj1lGbfBNNqOxur1C-M1dSpnhk3yHkOyAF0HgOE7LoPFiqtEJkPAL0n9QQyfWNHi2Z9TeGp7fA/s400/DiscussionByAge.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;

Broadly in line with my previous work, the average peak is in the early 50s to the late 60s -- though as you can see from the graphs above, there&#39;s considerable individual variation.  Overall, the results might trend a little bit earlier than what I&#39;ve seen in other analyses, but not by much.&lt;p&gt;
  
&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2024/12/age-and-fame-in-early-twentieth-century.html&quot;&gt;As I&#39;ve noted elsewhere&lt;/a&gt;, peak influence is often a couple of decades after the thinker&#39;s most influential work.  For example (besides Ayer as described above), Kuhn&#39;s and Popper&#39;s peaks in the late 1970s were 15-20 years after Kuhn&#39;s &lt;i&gt;Structure of Scientific Revolutions&lt;/i&gt; and about 20 years after the English translation of Popper&#39;s &lt;i&gt;Logic of Scientific Discovery&lt;/i&gt;.</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/9121807252649603856/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/9121807252649603856' title='4 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/9121807252649603856'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/9121807252649603856'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/03/age-and-fame-in-philosophy.html' title='Age and Fame in Philosophy'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhZga4I6UsQPqMTyfa70Lc3vRBZcIgq-CasT44T0UUr6fMK7btM_yF1rRcNFNSL3jdYz1XISm-4cnjmM0HrHe8uqYHgpFdSLBkK_AOKKijycTEMsOxirpth9iVyP__gcR6FX_29nIYMxMRdJAzJbKZ4xdLaGG2dbQFDvzU5IZzNwySoTjcClj0dgA/s72-c/Fodor-260312.jpg" height="72" width="72"/><thr:total>4</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-7542230007706691724</id><published>2026-03-06T10:00:00.000-08:00</published><updated>2026-03-06T10:10:36.995-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="metaphilosophy"/><category scheme="http://www.blogger.com/atom/ns#" term="sociology of philosophy"/><title type='text'>Philosophy Should Be Among the Most Diverse Disciplines, Not the Least</title><content type='html'>Philosophy should be among the most diverse of the academic disciplines.  Instead, it is among the least diverse.&lt;p&gt;
  
&lt;b&gt;Philosophical reflection is an essential part of the human condition, of interest to people of all cultures, races, classes, social groups, and body types.&lt;/b&gt;  Who doesn’t care whether we have immaterial souls that might continue to exist after we have died, about ethical issues such as war and human rights, about what’s worth pursuing in life, about when and how far we should trust scientific authority, about the best forms of government, about the origin and structure of the world?  Nothing about these issues – and nothing about philosophy as a discipline devoted to the fundamental questions of human existence – should make it of more interest to one gender rather than another, one cultural group rather than another, or to the able-bodied more than to the disabled.&lt;p&gt;
  
&lt;b&gt;Yet study after study and testimonial after testimonial show that the culturally privileged are overrepresented in academic philosophy&lt;/b&gt;.&lt;p&gt;
  &lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjWOJriocAsjgHPzJXD3k60XuWYX7ezYqN-7aLOURhDR10Ynu3I-zmeEbE4IDhHX23nWkagH9I0lUdRvGImnAHefpAyt6upgfx6r9JX_kcOmJJVAPRreraTrM4L6PDq0GCVIOQvoP69uasqnvChElCJuWcIn1Mq8I-lqUw6R1JRdzTcGXu8785A1Q/s1135/Kandinsky-composition-vii-1913.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;757&quot; data-original-width=&quot;1135&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjWOJriocAsjgHPzJXD3k60XuWYX7ezYqN-7aLOURhDR10Ynu3I-zmeEbE4IDhHX23nWkagH9I0lUdRvGImnAHefpAyt6upgfx6r9JX_kcOmJJVAPRreraTrM4L6PDq0GCVIOQvoP69uasqnvChElCJuWcIn1Mq8I-lqUw6R1JRdzTcGXu8785A1Q/s400/Kandinsky-composition-vii-1913.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[Kandinsky 1913, composition vii; &lt;a href=&quot;https://www.wikiart.org/news/a-symphony-in-color-7-paintings-that-defined-the-evolution-of-abstract-art/&quot;&gt;image source&lt;/a&gt;]&lt;p&gt;&lt;/br&gt;
  
For example, the &lt;a href=&quot;https://cdn.ymaws.com/www.apaonline.org/resource/resmgr/data_on_profession/fy2025_demographic_statistic.pdf&quot;&gt;self-reported gender&lt;/a&gt; of regular members of the American Philosophical Association in 2025 was 70% male, 29% female, and 1% nonbinary/something else.  Gender balance has been improving, though slowly: &lt;a href=&quot;https://web.archive.org/web/20170122104327/http://c.ymcdn.com/sites/www.apaonline.org/resource/resmgr/data_on_profession/Member_Demo_Chart_FY2016_rev.pdf&quot;&gt;In 2015&lt;/a&gt;, the corresponding percentages were 75%, 25%, and 0%.  The pipeline into philosophy suggests that change will continue: According to the National Science Foundation’s &lt;a href=&quot;https://ncses.nsf.gov/surveys/earned-doctorates/2024&quot;&gt;Survey of Earned Doctorates&lt;/a&gt;, 37% of philosophy PhD recipients in 2024 were female.  Despite this qualifiedly encouraging trend, philosophy PhDs remain more male than any of the other humanities besides theology and Bible studies and more male than every social science besides economics and finance.&lt;p&gt;
  
&lt;b&gt;Black people and American Indians are especially underrepresented in U.S. philosophy, and that situation doesn’t appear to be changing at all.&lt;/b&gt;  In 2025, 4% of regular APA members reported being Black or African American, compared with &lt;a href=&quot;https://data.census.gov/table/%20&quot;&gt;14% of the U.S. population&lt;/a&gt;.  The NSF data suggest no surge of new Black philosophers in the pipeline: the percentage among recent PhD recipients is also 4%.  The NSF data show no American Indian or Alaskan Native philosophy PhD recipients in 2024 (though people reporting both American Indian and Hispanic or Latino identities would not appear in this category) and only one in past four years (among 1692 doctorates awarded), although Native Americans constitute nearly 2% of the U.S. population.&lt;p&gt;
  
Another concern that has &lt;a href=&quot;https://philpapers.org/rec/CATLOA-2&quot;&gt;recently&lt;/a&gt; drawn &lt;a href=&quot;https://philarchive.org/rec/CONI-25&quot;&gt;attention&lt;/a&gt; is the linguistic insularity of mainstream Anglophone philosophy – that is, the &lt;b&gt;neglect of work written in other languages&lt;/b&gt;.  In 2018, &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/Anglophone.htm&quot;&gt;three collaborators and I&lt;/a&gt; examined citation practices in leading Anglophone journals and found that 97% of citations referred to work originally written in English.  Journals published in other languages were much less insular.  We also found that 96% of the editorial board members of journals perceived as elite were housed in majority-Anglophone countries.  More recently, &lt;a href=&quot;https://link.springer.com/article/10.1007/s13164-025-00779-8&quot;&gt;Uwe Peters and collaborators&lt;/a&gt; reported that non-native English speakers face substantial difficulties publishing in English, as is now practically required for gaining an international readership.&lt;p&gt;

The forthcoming book &lt;i&gt;Structural Injustice in Philosophy&lt;/i&gt;, edited by Maeve McKeown, Seunghyun Song and Milana Kostić, further documents, and aims to explain, the exclusionary structures of philosophy, not only concerning race, gender, and language, but also class, disability, culture, mobility, sexuality, place of origin, and more.&lt;p&gt;

#&lt;p&gt;

Could a structurally just academic system nonetheless happen to produce such skewed results, always in favor of the already powerful?  Let’s not join the long string of bigots who have held that some races, languages, genders, nationalities, classes, or physical types are more intrinsically suited for philosophy than others.&lt;p&gt;
  
It’s sometimes suggested that academic philosophy is – or is not unreasonably perceived as – a useless luxury, rightly scorned by people lacking cultural privilege.  Students from less privileged backgrounds might see studying philosophy, instead of a more financially or practically rewarding discipline, as wasting a precious opportunity to achieve financial security or practical success.  But even if this perception were entirely accurate – and I would argue it’s oversimplified – it wouldn’t constitute a genuine alternative to structural injustice.  &lt;b&gt;A society in which less privileged students feel less free to study philosophy than business or nursing is already a society with substantial injustice in higher education.&lt;/b&gt;  The perceived impracticality of the philosophy major is a symptom and mechanism of structural injustice rather than a neutral fact.  In a fair society, students who love philosophy wouldn’t be disproportionately deterred by class background.&lt;p&gt;
  
Even in a completely just society, students and professors will not always populate each discipline in exact proportion to their background rate in the general population.  Women might be drawn more to developmental psychology and men to architecture, White people to European history and Black people to African history, or whatever, for innocent reasons.  It might be suggested that similarly, philosophical issues just happen to be of more interest to people from culturally powerful groups.  I’ve already articulated the flaw in this argument: &lt;b&gt;It strains credulity to suppose that men more than women, or White people more than Black people, or people from the United States more than people from Brazil or China, or sighted people more than blind people, would or should, in a just society, care more about truth, justice, ethics, knowledge, and the fundamental nature of reality.&lt;/b&gt;  If anything, the opposite might be expected: Those who suffer under existing institutions should be especially motivated to think hard about the reform of those institutions and the cultural presuppositions that undergird them.&lt;p&gt;
  
Might people from less privileged backgrounds reasonably be expected, even in a just society, to be less interested than others in academia in general, and could this explain the pattern in philosophy?   Again, I see no good reason to think so and reason to think the opposite: Professorships offer good salaries (for those who escape the cycle of adjuncting) and offer unusual freedom to explore one’s interests and advocate change.  People from less privileged backgrounds might be especially drawn to such opportunities – at least if the career seems genuinely open to them – more so than people from elite backgrounds with many other attractive options.  In any case, if the problem were academia as such, and not philosophy in particular, we should see similar disparities across all fields.  And while many historically disadvantaged groups continue to be underrepresented in many disciplines, philosophy is more imbalanced than most across a wide range of measures.&lt;p&gt;

#&lt;p&gt;

&lt;b&gt;In a just society, historically underrepresented groups and minority perspectives would be &lt;i&gt;overrepresented&lt;/i&gt; rather than underrepresented in philosophy.&lt;/b&gt;  Academic philosophy should celebrate diversity of opinion, encourage challenges to orthodoxy, and reward fresh perspectives from cultures and life experiences outside the mainstream.  We should be eager, not reluctant, to hear from a wide range of voices.  We should especially welcome, not create a chilly environment for, people with unusual or culturally atypical or historically neglected ideas, practices, and worldviews.  The productive engine of philosophy depends on novelty and difference.&lt;p&gt;
  
Philosophy is a dialectical discipline that thrives in the clash, reconciliation, and creative synthesis of diverse views; and our views are profoundly shaped by our cultural backgrounds and life experiences.  Uniformity dulls our collective philosophical thinking.  A fair and flourishing discipline would treasure rather than repel those who have historically been excluded.  Consequently, even if every social group were proportionally represented in philosophy, we would still have reason to suspect systemic injustice.  The injustices of our discipline will not be overcome until we are collectively eager to hear proportionally more from previously excluded groups than from the privileged and powerful.
</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/7542230007706691724/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/7542230007706691724' title='5 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7542230007706691724'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7542230007706691724'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/03/philosophy-should-be-among-most-diverse.html' title='Philosophy Should Be Among the Most Diverse Disciplines, Not the Least'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjWOJriocAsjgHPzJXD3k60XuWYX7ezYqN-7aLOURhDR10Ynu3I-zmeEbE4IDhHX23nWkagH9I0lUdRvGImnAHefpAyt6upgfx6r9JX_kcOmJJVAPRreraTrM4L6PDq0GCVIOQvoP69uasqnvChElCJuWcIn1Mq8I-lqUw6R1JRdzTcGXu8785A1Q/s72-c/Kandinsky-composition-vii-1913.jpg" height="72" width="72"/><thr:total>5</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-8186957024734318195</id><published>2026-02-28T09:05:00.000-08:00</published><updated>2026-02-28T09:05:15.383-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="discussion arcs"/><category scheme="http://www.blogger.com/atom/ns#" term="sociology of philosophy"/><title type='text'>Discussion Arcs for Topics and Philosophers</title><content type='html'>Let&#39;s define a &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2010/04/discussion-arcs.html&quot;&gt;discussion arc&lt;/a&gt; in philosophy as a curve displaying how frequently a term appears in philosophical journal abstracts, titles, and keywords.  Discussion arcs can reveal trends over time, as they come and go.&lt;p&gt;
  
For example, &quot;ordinary language&quot; was a popular topic of discussion in the 1960s:
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj6lKz3OQ91fPhs_QRMnzcOH3WetOYbJI6kHpl7rxcsoHTwDtF6hZ9Wgez0r-ANG8g09XbmC4L1YNEZIDuFM4bE6HrytaeSgvVl3UcidueC8a1b4xoqkI1ALSIUVAtVKZvx0O065iMd0BYNDYgbZQtwebEcdUIGFK0Os9oScAV8e7lW3dAD6dT9cA/s536/OrdinaryLanguage-260227.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;446&quot; data-original-width=&quot;536&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj6lKz3OQ91fPhs_QRMnzcOH3WetOYbJI6kHpl7rxcsoHTwDtF6hZ9Wgez0r-ANG8g09XbmC4L1YNEZIDuFM4bE6HrytaeSgvVl3UcidueC8a1b4xoqkI1ALSIUVAtVKZvx0O065iMd0BYNDYgbZQtwebEcdUIGFK0Os9oScAV8e7lW3dAD6dT9cA/s400/OrdinaryLanguage-260227.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[click to enlarge and clarify]&lt;p&gt;

My method: I searched for the term in the abstract, title, or keywords (subject) of articles in &lt;a href=&quot;https://philindex.org/&quot;&gt;Philosopher&#39;s Index&lt;/a&gt;, in five-year chunks from 1940-1945 to 2021-2025.[1] I then divided the number of hits by a representative universe of hits for abstract or title containing &quot;language&quot;, &quot;epistemology&quot;, &quot;mind&quot;, &quot;metaphysics&quot;, &quot;ethics&quot;, &quot;moral&quot;, &quot;political&quot;, &quot;aesthetics&quot;, or &quot;logic&quot;.[2]&lt;p&gt;
  
By looking for distinctive terms or phrases connected to particular arguments, we can also track when an argument receives wide uptake.  For example, Hilary Putnam&#39;s famous &quot;Twin Earth&quot; thought experiment, though originally presented in 1975, didn&#39;t receive much immediate uptake, receiving peak discussion in the 1990s:&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiZ8xFDgBlnPaBnurJ4g3CyRo5n15xfb3JTJN8hIbI2PPmY8Yn6DcYpCvwOHS4lukuTPSk2Ma5OYQtlf2YK-i2FjtUQab2oc6sbmWySoq90YIGxJDMsgzr7R8EXlrmW4Mi1Gyqb97FLh5rn7mvpKq1Ne6ufHSvQm3zq_yqzkAPe71joTLhDxv1dqA/s536/TwinEarth-260227.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;446&quot; data-original-width=&quot;536&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiZ8xFDgBlnPaBnurJ4g3CyRo5n15xfb3JTJN8hIbI2PPmY8Yn6DcYpCvwOHS4lukuTPSk2Ma5OYQtlf2YK-i2FjtUQab2oc6sbmWySoq90YIGxJDMsgzr7R8EXlrmW4Mi1Gyqb97FLh5rn7mvpKq1Ne6ufHSvQm3zq_yqzkAPe71joTLhDxv1dqA/s400/TwinEarth-260227.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;p&gt;

We can also look for topical trends.  If you think artificial intelligence is recently hot, you&#39;re right!&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgaqtd08z56_I5bArsOfpimwaVmi5ZdQWeOZOHTkH0hEOmoK9PUibku4g96p3PeZ0jo0ldeFFX0RdKjtWESXsKHIY6OxruiXu0QT74RU5Ir55j1biGUMhMlp0-UE2FQ-CXnc_G9kl3uuOohRwoq3BrNeT9BOIYyJwXyvBjdKlj2Wig2IfsAt1He-w/s536/AI-260227.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;446&quot; data-original-width=&quot;536&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgaqtd08z56_I5bArsOfpimwaVmi5ZdQWeOZOHTkH0hEOmoK9PUibku4g96p3PeZ0jo0ldeFFX0RdKjtWESXsKHIY6OxruiXu0QT74RU5Ir55j1biGUMhMlp0-UE2FQ-CXnc_G9kl3uuOohRwoq3BrNeT9BOIYyJwXyvBjdKlj2Wig2IfsAt1He-w/s400/AI-260227.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;p&gt;
  
The figure also shows a smaller peak in the late 1980s, before the &quot;AI winter&quot; that has recently thawed.&lt;p&gt;
  
Another thing you might see in the artificial intelligence arc, if you squint, is noise early in the data.  This makes sense, since the early data have far fewer hits in the representative universe of articles containing one of the nine keywords: 457 articles in 1940-1945, compared to 33,552 in 2016-2020.&lt;p&gt;
  
We can also examine the discussion arcs of particular philosophers.  Note that &quot;discussion&quot; in the sense I&#39;ve defined is very different from citation.  A philosopher who is cited in passing might have very little influence on the shape of an article.  In contrast, if a philosopher&#39;s name is explicitly mentioned in the title, abstract, or keywords, that philosopher&#39;s work is among the chief topics of the article.  Discussion rates and citation rates thus capture different phenomena and will sometimes diverge.&lt;p&gt;  
  
Here are the arcs for the seven &quot;most important Western philosophers of all time&quot;, according to &lt;a href=&quot;https://leiterreports.com/2017/04/24/the-most-important-western-philosophers-of-all-time/&quot;&gt;a poll by Brian Leiter in 2017&lt;/a&gt;.
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjZs_A08faCEI5a0gnX4wFDWwfvlLa64D-EfqJY6kZhUeMBShZYI3ele9-i8HeymTqS-Rh7fV9WV7c3ZRy-Syxm0XXriwi4P56QnpNlZNIym3gz2KZQGgfdF69cNBJIu3do7VVpuYb_i0skt92_kheCDUt-tvpho5pbFPARrC5NYc96EEok8TNS8w/s698/7figures-270227.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;611&quot; data-original-width=&quot;698&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjZs_A08faCEI5a0gnX4wFDWwfvlLa64D-EfqJY6kZhUeMBShZYI3ele9-i8HeymTqS-Rh7fV9WV7c3ZRy-Syxm0XXriwi4P56QnpNlZNIym3gz2KZQGgfdF69cNBJIu3do7VVpuYb_i0skt92_kheCDUt-tvpho5pbFPARrC5NYc96EEok8TNS8w/s400/7figures-270227.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;p&gt;
  
Notice how much discussion there was of Aristotle in the early 1940s!  &quot;Aristotle&quot; alone gets more than a quarter as many hits as the broad disjunctive search for &quot;language&quot; or &quot;epistemology&quot; or &quot;mind&quot; or &quot;metaphysics&quot; or &quot;ethics&quot; or &quot;moral&quot; or &quot;political&quot; or &quot;aesthetics&quot; or &quot;logic&quot;.  Plato and Kant each also get almost as many as Aristotle.&lt;p&gt;
  
Since then, there&#39;s been a steady decline in the proportion of articles mentioning these prominent historical figures.  My sense is that this is due to two factors: first, the decline in the relative prominence of history of philosophy compared to other areas of philosophy; second, a shift within history of philosophy toward considering a broader range of traditions and figures, including non-Western traditions and women philosophers.&lt;p&gt;
  
It&#39;s perhaps also interesting that Kant has declined less than the ancient Greeks, overtaking them as the most discussed philosopher.&lt;p&gt;
  
I thought I&#39;d finish today with discussion arcs of some more recent historical figures, who peak at different times:&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhRTiEA6an8HnUjqcc_WdzFjmeYekNQuNvAmL1rSJheMSjLmAwpsDH_XO3KkfveHFpwPf23BoETsniXnwiSjMmUdpUyxJSwmB0ARtSN8Zsg1md6ubzaAPw5V_oiaY72nLyVUlxHJDqiQgFi2eUsMReTmsSIqy8TonKP4hMDVyWjFSsTPvQiuykQxA/s698/5figures-250227.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;400&quot; data-original-height=&quot;611&quot; data-original-width=&quot;698&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhRTiEA6an8HnUjqcc_WdzFjmeYekNQuNvAmL1rSJheMSjLmAwpsDH_XO3KkfveHFpwPf23BoETsniXnwiSjMmUdpUyxJSwmB0ARtSN8Zsg1md6ubzaAPw5V_oiaY72nLyVUlxHJDqiQgFi2eUsMReTmsSIqy8TonKP4hMDVyWjFSsTPvQiuykQxA/s400/5figures-250227.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;p&gt;

Notice that Russell peaks in the 1940s to 1950s, during which period he is discussed almost as much as Aristotle, Plato, and Kant, who themselves are (as I noted above) discussed in a striking proportion of articles.  There is a little false positive noise for Russell (some other people with &quot;Russell&quot; as a first or last name), but my estimate is that about 90% of hits are accurate.&lt;p&gt;
  
Wittgenstein peaks in the late 1960s, Frege in the early 1980s, and Nietzsche in the early 2000s.  Heidegger&#39;s influence is moderately steady from the late 1960s to the early 2000s, declining modestly in the past couple of decades.&lt;p&gt;
  
Perhaps worth mentioning: These data cover a wide range of journals, regardless of prestige.  In &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2022/06/the-continentalanalytic-divide-is-alive.html&quot;&gt;an analysis a few years ago&lt;/a&gt;, I found that the journals conventionally regarded as prestigious mention prominent &quot;analytic&quot; philosophers such as Frege, Wittgenstein, Quine, Chisholm, and Putnam much more frequently than prominent &quot;Continental&quot; philosophers such as Nietzsche, Heidegger, Sartre, Foucault, and Derrida: The analytic/Continental divide is alive and well.  There might be other interesting trends to discover if we subdivide the universe of articles.&lt;p&gt;
  
In a week or two, I&#39;ll do a follow up post looking at the discussion arcs of prominent philosophers born 1900-1945, partly to test my &lt;a href=&quot;https://eschwitz.substack.com/p/age-and-philosophical-fame-in-the&quot;&gt;&quot;age and fame&quot; hypothesis&lt;/a&gt;: that philosophers tend to have their peak influence around ages 55-70.&lt;p&gt;
  
--------------------------------------------&lt;p&gt;
[1] The data begin in 1940, so I used a six-year chunk for the first period.  As of Feb 27, data for 2025 are only about two-thirds complete and there are no data for 2026.&lt;p&gt;
  
[2] This is not all articles, especially in the early period, where abstracts are less available.  The idea is to roughly compensate for having fewer abstracts, and thus fewer opportunities for hits in the early data, by comparing the rate of appearance of the target terms with the rate of appearance of several, but not too many, widely used terms across a variety of philosophical topics.&lt;p&gt;
  
  </content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/8186957024734318195/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/8186957024734318195' title='11 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8186957024734318195'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8186957024734318195'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/02/discussion-arcs-for-topics-and.html' title='Discussion Arcs for Topics and Philosophers'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj6lKz3OQ91fPhs_QRMnzcOH3WetOYbJI6kHpl7rxcsoHTwDtF6hZ9Wgez0r-ANG8g09XbmC4L1YNEZIDuFM4bE6HrytaeSgvVl3UcidueC8a1b4xoqkI1ALSIUVAtVKZvx0O065iMd0BYNDYgbZQtwebEcdUIGFK0Os9oScAV8e7lW3dAD6dT9cA/s72-c/OrdinaryLanguage-260227.jpg" height="72" width="72"/><thr:total>11</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-2388212527284943173</id><published>2026-02-19T08:39:00.000-08:00</published><updated>2026-02-19T10:07:36.270-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI"/><category scheme="http://www.blogger.com/atom/ns#" term="consciousness"/><title type='text'>Disunity and Indeterminacy in Artificial Consciousness (and Maybe in Human Consciousness Too)</title><content type='html'>Our understanding of the nature of consciousness derives mainly from our understanding of the nature of consciousness in our favorite animal (us, of course).  But the features of consciousness in our favorite animal might be specific to that animal rather than universal.&lt;p&gt;
  
Let&#39;s consider two such features and whether we should expect them in conscious AI systems, if conscious AI systems are ever possible.&lt;p&gt;
  
&lt;b&gt;Unity: Our conscious experiences at any given moment are bound together into a single unified experience&lt;/b&gt;, rather than transpiring in separate streams.  If I&#39;m sitting on a wet park bench, I might (a.) visually experience the leafy green trees around me, (b.) tactilely experience the cold dampness soaking into my jeans, and (c.) consciously recall the smaller trees of yesteryear.  Normally -- perhaps necessarily -- three such experiences would not run in disconnected streams.  They would join into a composite experience of (a)-with-(b)-with-(c).  I experience not just trees, cold dampness, and a memory of yesteryear, but all three together as a unified bundle.&lt;p&gt;
  
&lt;b&gt;Determinacy: At any given moment, I am either determinately conscious or determinately nonconscious&lt;/b&gt; (as in anesthesia or dreamless sleep).  Likewise, I &lt;b&gt;either determinately do, or determinately do not, have any particular experience&lt;/b&gt;.  Gray-area cases are at least unusual and maybe impossible.  Even the simplest, barest cases are still determinate.  Consider visual experience: We might imagine the visual field narrowing and losing content until only a gray dot remains -- and then the dot winks out.  That dot, however minimal, is still determinately experienced.  When it winks out, consciousness determinately disappears.  There is no half-winked state between the minimal gray dot and complete absence of visual experience.&lt;p&gt;
  
&lt;b&gt;My thought is that we should not expect unity and determinacy to be general features of conscious AI systems&lt;/b&gt; (if conscious AI is possible).  To see why, let&#39;s start by assuming the &lt;a href=&quot;https://en.wikipedia.org/wiki/Global_workspace_theory&quot;&gt;Global Workspace Theory&lt;/a&gt; of consciousness.  I focus on Global Workspace Theory because it&#39;s probably the leading scientific theory of consciousness and because its standard formulation (&lt;a href=&quot;https://www.college-de-france.fr/en/chair/stanislas-dehaene-experimental-cognitive-psychology-statutory-chair&quot;&gt;Dehaene&lt;/a&gt;&#39;s version) invites the assumption of unity and determinacy.&lt;p&gt;
  
&lt;b&gt;Global Workspace Theory divides the mind into local information processing modules linked by a shared global workspace.&lt;/b&gt;  Information becomes conscious when it is broadcast into the workspace.  Suppose your auditory system registers the faint honk of a distant car horn.  You&#39;re absorbed in reading philosophy and accustomed to ignoring traffic noise, so this representation isn&#39;t selected for further processing.  It&#39;s not a target of attention, not broadcast into the workspace, and not consciously experienced.  (If you think you &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/ExpWOAttn.htm&quot;&gt;constantly consciously experience background sounds&lt;/a&gt;, you can&#39;t hold a standard Global Workspace view.)  Once you attend to the noise, for whatever reason, that information &quot;ignites&quot; into the global workspace, becoming available to a wide variety of &quot;downstream&quot; processes: You can think about it, plan around it, verbally report it, store it in long-term memory, and flexibly combine it with other information in the workspace.  On Global Workspace Theory, being available in this way &lt;i&gt;just is&lt;/i&gt; what it is for the information to be consciously experienced.&lt;p&gt;
  
This model suggests unity and determinacy.  Since there is just one global workspace, and since that workspace enables flexible integration of everything it contains, it makes sense that&lt;b&gt; its various elements will combine into a unified experience&lt;/b&gt;.  And on Dehaene&#39;s version, &lt;b&gt;ignition into the workspace is a sharp-boundaried event&lt;/b&gt;: Information either completely ignites, becoming available for all downstream processes, or it does not.  There is no (or only rarely) partial ignition.  This can explain determinacy.&lt;p&gt;
  
&lt;b&gt;But future AI systems might not share this structure.  They might have multiple or partially overlapping workspaces.&lt;/b&gt;  Different specialized subsystems might have access to different regions of a partly-shared workspace.  Some animals, such as &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/Snails.htm&quot;&gt;snails&lt;/a&gt; and &lt;a href=&quot;https://philpapers.org/rec/CARTOA-11&quot;&gt;octopuses&lt;/a&gt;, distribute processing among multiple ganglia or neural centers that are less tightly coupled than the hemispheres of the human brain.  A robot might broadcast information relevant to locomotion to one area and information relevant to speech to another with limited connectivity.&lt;p&gt;
  
If the subsystems are entirely disconnected, the result might be entirely discrete centers of subjective experience within a single organism or machine.  But if they are partly connected, experience might be only &lt;i&gt;partly&lt;/i&gt; unified.  In the park bench example, the experience of the trees might be unified with the experience of dampness, and the experience of dampness with memories of yesteryear, but the experience of the trees might &lt;i&gt;not&lt;/i&gt; be unified with the memories.  (Unification would not then be a transitive relation.)  Alternatively, some weaker relation of partial unification might hold among the visual, tactile, and memorial experiences.  If this seems inconceivable or impossible, see Sophie Nelson&#39;s and my &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/CountingSubjects.htm&quot;&gt;article&lt;/a&gt; on indeterminate or fractional subjects.&lt;p&gt;
  
More abstractly: There&#39;s no compelling architectural reason why an AI system would have to make information available either to all downstream processes or to none.  A workspace defined in terms of downstream availability could be a patchwork of partial availabilities rather than a fully global all-or-nothing broadcast.&lt;p&gt;
  
&lt;b&gt;For the same reason, ignition into the workspace needn&#39;t be all-or-nothing.&lt;/b&gt;  Between full ignition with determinate consciousness and no ignition with determinate nonconsciousness, there might be in-between, gray-area half-ignitions that are neither determinately conscious nor determinately nonconscious.  Nearly every property with a complex physical or functional basis allows indeterminate, borderline cases: baldness, extraversion, greenness, happiness, whether you&#39;re wearing a shoe, whether a country is a democracy.  The human global workspace might minimize indeterminacy -- like it&#39;s rarely indeterminate in basketball whether the ball has gone through the hoop.  But change the architecture and indeterminacy might become common: a half-hearted ignition, or just enough information-sharing to make it &lt;a href=&quot;https://global.oup.com/academic/product/human-and-animal-minds-9780198843702?cc=us&amp;amp;lang=en&amp;amp;#:~:text=This%20has%20led%20many%20people,no%20scientific%20or%20ethical%20significance.&quot;&gt;indeterminate whether a workspace even exists&lt;/a&gt;.  (If indeterminacy about consciousness strikes you as inconceivable or impossible, see my 2023 article on &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/BorderlineConsciousness.htm&quot;&gt;borderline consciousness&lt;/a&gt;.)&lt;p&gt;
  
Global Workspace Theory might of course be wrong.  &lt;b&gt;But most other theories of consciousness make my argument at least as easy.&lt;/b&gt;  Dennett&#39;s &lt;a href=&quot;http://www.scholarpedia.org/article/Multiple_drafts_model&quot;&gt;fame-in-the-brain&lt;/a&gt; version of broadcast theory explicitly permits disunity and indeterminacy.  Higher Order Theories admit the same fragmentation and, probably, gradualism.  So do biological theories and theories that focus on embodiment.  (Integrated Information Theory is an exception: Its axioms require bright-lined unity and determinacy.  But as &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2014/07/tononis-exclusion-postulate-would-make.html&quot;&gt;I&#39;ve argued&lt;/a&gt;, those bright-line axioms lead to unpalatable consequences.)&lt;p&gt;
  
&lt;b&gt;Recognizing these possibilities for AI systems invites the further thought: Maybe we humans aren&#39;t quite as unified as we normally suppose.&lt;/b&gt; Maybe indeterminate and disunified consciousness is common.  Maybe processes outside of attention hover indeterminately between being conscious and nonconscious.  Maybe some processes are only partly unified.  If it seems otherwise in introspection and memory, maybe that&#39;s because &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2024/09/the-disunity-of-consciousness-in.html&quot;&gt;introspection and memory tend to &lt;i&gt;impose&lt;/i&gt; unity and determinacy&lt;/a&gt; where none was before.&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhdqyZvEQzZS7XnpLLCgjVozmT068ewo8CfpbpdEHjguEbGj8krPj31SgXLGF4TcuCuYUaWXgTiLlArX_YeAg8QUFtFugOV7BDNUid26sEPBLwdlHQLpUWkU0DPR2eJ-bOIfyHPdNnAfbBGIhUvLey04ZeY3De0py9xxtoiPPjjrmQLYN-WPSyAIQ/s1920/paul-klee-kunst-malerei-1667926350gEn.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; height=&quot;320&quot; data-original-height=&quot;1920&quot; data-original-width=&quot;1793&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhdqyZvEQzZS7XnpLLCgjVozmT068ewo8CfpbpdEHjguEbGj8krPj31SgXLGF4TcuCuYUaWXgTiLlArX_YeAg8QUFtFugOV7BDNUid26sEPBLwdlHQLpUWkU0DPR2eJ-bOIfyHPdNnAfbBGIhUvLey04ZeY3De0py9xxtoiPPjjrmQLYN-WPSyAIQ/s320/paul-klee-kunst-malerei-1667926350gEn.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt; [a Paul Klee painting, untitled 1914: &lt;a href=&quot;https://www.publicdomainpictures.net/en/view-image.php?image=473770&amp;amp;picture=paul-klee-art-painting&quot;&gt;source&lt;/a&gt;]</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/2388212527284943173/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/2388212527284943173' title='12 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/2388212527284943173'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/2388212527284943173'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/02/disunity-and-indeterminacy-in.html' title='Disunity and Indeterminacy in Artificial Consciousness (and Maybe in Human Consciousness Too)'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhdqyZvEQzZS7XnpLLCgjVozmT068ewo8CfpbpdEHjguEbGj8krPj31SgXLGF4TcuCuYUaWXgTiLlArX_YeAg8QUFtFugOV7BDNUid26sEPBLwdlHQLpUWkU0DPR2eJ-bOIfyHPdNnAfbBGIhUvLey04ZeY3De0py9xxtoiPPjjrmQLYN-WPSyAIQ/s72-c/paul-klee-kunst-malerei-1667926350gEn.jpg" height="72" width="72"/><thr:total>12</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-3903730147481796076</id><published>2026-02-13T11:32:00.000-08:00</published><updated>2026-02-13T12:24:54.099-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="aesthetics"/><category scheme="http://www.blogger.com/atom/ns#" term="axiology"/><title type='text'>The Intrinsic Value of Diversity</title><content type='html'>Moral diversity, &lt;a href=&quot;https://obailey.weebly.com/&quot;&gt;Olivia Bailey&lt;/a&gt; and &lt;a href=&quot;https://objectionable.net/&quot;&gt;Thi Nguyen&lt;/a&gt; say (in a draft paper shared with Myisha Cherry&#39;s &lt;a href=&quot;https://www.emotionandsociety.com/&quot;&gt;Emotion and Society Lab&lt;/a&gt;), is valuable.  It&#39;s good that people have different ethical personalities, opinions, and concerns.  (Within reason: Nazis not welcome.)&lt;p&gt;
  
Why?  Their reasons are instrumental.  Society benefits when people care intensely about different things.  This allows us collectively to achieve a wide range of goals -- curing cancer, helping the homeless, protesting unjust government.  Society also benefits if some people explore the ethical possibility space, developing unusual moral visions, most of which will be mistaken but a few of which might eventually be recognized as genuine moral advances (think of the first slavery abolitionists).  And individuals benefit from the liberty to adopt moral priorities that fit their skills and temperaments: Some people thrive in battle, others in caregiving, others in solitary work.&lt;p&gt;
  
But is moral diversity also &lt;i&gt;intrinsically&lt;/i&gt; valuable -- that is, valuable for its own sake, independent of these good consequences?  I think so.  I think so because &lt;b&gt;diversity in general is intrinsically valuable, and there&#39;s no good reason to treat moral diversity as an exception&lt;/b&gt;.&lt;p&gt;
  
How does one argumentatively establish the intrinsic value of diversity?  The only way I know is to reveal, through thought experiment, that you already implicitly accept it -- and then to ward off objections.&lt;p&gt;
  
Bailey and Nguyen briefly cite Alexander Nehamas on &lt;a href=&quot;https://www.jstor.org/stable/20027806&quot;&gt;diversity of aesthetic opinion&lt;/a&gt;.  Nehamas writes:
  &lt;blockquote&gt;I think a world where everyone liked, or loved, the same things would be a desperate, desolate world -- as devoid of pleasure and interest as the most frightful dystopia of those who believe (quite wrongly) that the popular media  are inevitably producing a depressingly, disconsolately uniform world culture.  And although I say this with serious discomfort, a world in which everyone liked Shakespeare, or Titian, or Bach for the same reasons -- if such a world were possible -- appears to me no better than a
 world where everyone tuned in to &lt;i&gt;Baywatch&lt;/i&gt; or listened to the worst pop music at the same time (Nehamas 2002, p. 58-59).&lt;/blockquote&gt;&lt;p&gt;
   
Why is aesthetic diversity valuable, according to Nehamas?  Because style and taste require originality and are bound up with what is distinctive about your life, interests, and sensibility.  Without distinctiveness, style and taste collapse -- an aesthetic disaster.&lt;p&gt;
  
Should we say, then, that diversity, including moral diversity, is valuable &lt;i&gt;aesthetically&lt;/i&gt;?  That its value lies primarily in its beauty, in its capacity to inspire awe, or some other aesthetic feature?  Indeed, diversity is beautiful and awesome (imagine the world without it!) but I don&#39;t think this exhausts its intrinsic value.  Aesthetic value requires a spectator, at least a notional one, whose appreciation is the point.  The intrinsic value of diversity is not, or not primarily, mediated through the hypothetical reaction of an aesthetic spectator.&lt;p&gt;
  
My favorite approach to thinking about intrinsic value is the &lt;a href=&quot;https://aeon.co/essays/if-you-ask-why-youre-a-philosopher-and-youre-awesome&quot;&gt;Distant Planet Thought Experiment&lt;/a&gt;.  Imagine a planet on the far side of the galaxy, blocked from view by the galactic core, a planet we&#39;ll never see or interact with.  What would we hope for on this planet, for its own sake, independent of any potential value for us?&lt;p&gt;
  
Would you hope that it&#39;s a sterile rock, completely devoid of life?  I think not.  If you do think a lifeless rock would be best, I have no argument against you.  For me this is a starting place, a bedrock judgment, which I expect most readers will share.&lt;p&gt;
  
Suppose, then, that you agree a planet with life would be intrinsically better than one without.  Would you hope that its life consists entirely of microbes?  Or would you hope that it teems with diverse life: reefs and rainforests, beetles and bats, squid and bees and ferns and foxes -- or rather, not to duplicate Earth too closely, their alien analogues, translated into a different key?  I think you&#39;ll hope that the planet teems with diverse life.&lt;p&gt;
  
Would you hope that no life on this planet has humanlike behavioral sophistication -- language, long-term planning, complex social coordination?  Would you hope that nothing there could contemplate the meaning of life, the origin of the stars, or its own ancient history?  Would you hope that nothing there could create art, or engage in athletic competition, or invent complex games and tricks and jokes?  I invite you to join me in thinking otherwise.  The planet would be better if it included some beings with that richness of thought and activity.&lt;p&gt;
  
Would you hope for uniformity of intellectual, aesthetic, and ethical opinion -- that everyone shares the same values and ideas?  Or would you hope for diversity?  I think you&#39;ll join me in thinking that the world would be better, better for its own sake, if it were diverse rather than uniform.  Different entities would have different skills, preferences, passions, and ideas.  They&#39;ll fight and disagree (not genocidally, I hope), sometimes value their differences, sometimes dismiss others as completely wrongheaded, sometimes cluster into shared projects, sometimes collaborate across deep disagreement, sometimes be drawn to opposites, sometimes feel kinship with the like-minded, play within and across divides, pursue an enormous variety of projects, explore a vast space of possible forms of life.&lt;p&gt;
  
That is what I hope for on this distant planet -- not for instrumental reasons (not, for example, because it will maximize happiness), and not merely because it would strike a hypothetical spectator as beautiful and awesome (though it should).  Rather, just because it would be valuable for its own sake.  &lt;b&gt;An empty void has little or no value; a rich plurality of forms of existence has immense value, no further justification required.&lt;/b&gt;&lt;p&gt;
  
I have not argued for this.  I have only stated it vividly, hoping that you already accept it.&lt;p&gt;
  
Is ethical opinion an exception?  Should we prefer unity and conformity in ethics, even while welcoming diversity elsewhere?  I think not, for two reasons.&lt;p&gt;
  
First, ethics is open-textured, indeterminate, and full of tragic dilemmas.  Often there is no one decisively best answer on which everyone should converge.  Diversity within &lt;i&gt;at least&lt;/i&gt; the bounds of reasonable disagreement should be permitted.&lt;p&gt;
  
Second, ethical values are inseparable from our other values and ways of life.  A philosophy professor, a civil rights lawyer, a professional athlete, and a farmer will value different things.  There is, I think, no point in attempting to cleanly separate their differing values into distinct types, some of which are permitted to vary and others of which may not.  The ethical, prudential, epistemic, and aesthetic blur together.  These distinctions are not as clean as philosophers often assume.  &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2011/07/mush-of-normativity.html&quot;&gt;Normativity is a mush&lt;/a&gt;.&lt;p&gt;
  
Oh, some of you disagree?  Good!&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhDsfESLKtYGuTOl_QDYqKVVEcLVGjne74cc9RKirKuxMOZ9XUwITlgnxkR3PTQGEQydQcofFGLX8iNmz3aaA6tavgMQEic_MkmX8fupH4jdjG8Ti6eukDUcS9-EW-O2bwc1TNZBtwXIHNRJpGSMEoDlU16q3n_46Y_Yq56vrpCt7pcPJWe-xx1fw/s2850/WeirdnessCover.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; height=&quot;320&quot; data-original-height=&quot;2850&quot; data-original-width=&quot;1875&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhDsfESLKtYGuTOl_QDYqKVVEcLVGjne74cc9RKirKuxMOZ9XUwITlgnxkR3PTQGEQydQcofFGLX8iNmz3aaA6tavgMQEic_MkmX8fupH4jdjG8Ti6eukDUcS9-EW-O2bwc1TNZBtwXIHNRJpGSMEoDlU16q3n_46Y_Yq56vrpCt7pcPJWe-xx1fw/s320/WeirdnessCover.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[the cover of my 2024 book, &lt;a href=&quot;https://press.princeton.edu/books/hardcover/9780691215679/the-weirdness-of-the-world&quot;&gt;&lt;i&gt;The Weirdness of the World&lt;/i&gt;&lt;/a&gt;, hardback version]</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/3903730147481796076/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/3903730147481796076' title='6 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/3903730147481796076'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/3903730147481796076'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/02/the-intrinsic-value-of-diversity.html' title='The Intrinsic Value of Diversity'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhDsfESLKtYGuTOl_QDYqKVVEcLVGjne74cc9RKirKuxMOZ9XUwITlgnxkR3PTQGEQydQcofFGLX8iNmz3aaA6tavgMQEic_MkmX8fupH4jdjG8Ti6eukDUcS9-EW-O2bwc1TNZBtwXIHNRJpGSMEoDlU16q3n_46Y_Yq56vrpCt7pcPJWe-xx1fw/s72-c/WeirdnessCover.jpg" height="72" width="72"/><thr:total>6</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-7126519913303919387</id><published>2026-02-05T09:03:00.000-08:00</published><updated>2026-02-05T09:03:54.660-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI"/><title type='text'>Artificial Intelligence as Strange Intelligence: Against Linear Models of Intelligence (New Paper in Draft)</title><content type='html'>&lt;b&gt;by Kendra Chilson and Eric Schwitzgebel&lt;p&gt;&lt;/b&gt;

&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEguGK1D6JD1dEKVoK28CWA7E22myZorJJUEnyw44adrBOkjh7nAOm8fZhyphenhyphenchGuynCvt9Vil3npi4YejDx0XmmKm8U5Sl5p_LBrNLYL4RD0zRBJccM16ZtPO44r27K91VV-w50FoCoZbmaPvw1emdax24aOFSSXFyysR_F7_xxWl0bd5ZAWSvcUWFA/s606/StrangeIntelligence.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;566&quot; data-original-width=&quot;606&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEguGK1D6JD1dEKVoK28CWA7E22myZorJJUEnyw44adrBOkjh7nAOm8fZhyphenhyphenchGuynCvt9Vil3npi4YejDx0XmmKm8U5Sl5p_LBrNLYL4RD0zRBJccM16ZtPO44r27K91VV-w50FoCoZbmaPvw1emdax24aOFSSXFyysR_F7_xxWl0bd5ZAWSvcUWFA/s320/StrangeIntelligence.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;p&gt;

Our main idea, condensed to 1000 words:&lt;p&gt;

On a &lt;i&gt;linear model of intelligence&lt;/i&gt;, entities can be roughly linearly ordered in overall intelligence: frogs are smarter than nematodes, cats smarter than frogs, apes smarter than cats, and humans smarter than apes.  This same linear model is often assumed when discussing AI systems.  &quot;Narrow AI&quot; systems (like chess machines and autonomous vehicles) are assumed to be subhuman in intelligence, at some point -- maybe soon -- AI systems will have approximately human-level intelligence, and in the future we might expect superintelligent AI that exceeds our intellectual capacity in virtually all domains of interest.&lt;p&gt;
  
Building on the work of &lt;a href=&quot;https://philpapers.org/rec/SCHFLT-9&quot;&gt;Susan Schneider&lt;/a&gt;, we challenge this linear model of intelligence.  Central to our project is the concept of general intelligence as &lt;i&gt;the ability to use information to achieve a wide range of goals in a wide variety of environments&lt;/i&gt;.&lt;p&gt;
  
Of course even the simplest entity capable of using information to achieve goals can succeed in some environments, and no finite entity could succeed in all possible goals in all possible environments.  &quot;General intelligence&quot; is therefore a matter of degree.  Moreover, general intelligence is a massively multidimensional matter of degree: There are many many possible goals and many many possible environments and no non-arbitrary way to taxonomize and weight all these goals and environments into a single linear scale or definitive threshold.&lt;p&gt;
  
Every entity is in important respects narrow: Humans also can achieve their goals in only a very limited range of environments.  Interstellar space, the deep sea, the Earth&#39;s crust, the middle of the sky, the center of a star -- transposition to any of these places will quickly defeat almost all our plans.  We depend for our successful functioning on a very specific context.  So of course do all animals and all AI systems.&lt;p&gt;
  
Similarly, although humans are good at a certain range of tasks, we cannot detect electrical fields in the water, dodge softballs while hovering in place, communicate with dolphins by echolocation, or calculate a hundred digits of pi in our heads.  If we put a server with a language model in the desert without a power source or if we place an autonomous vehicle in a chess tournament and then interpret their incompetence as a lack of general intelligence, we risk being as unfair to them as a dolphin would be to blame us for our poor skills in their environment.  Yes, there&#39;s a perfectly reasonable sense in which chess machines and autonomous vehicles have much more limited capacities than do humans.  They are narrow in their abilities compared to us by almost any plausible metric of narrowness.  But it is anthropocentric to insist that general intelligence requires generally successful performance on the tasks and in the environments that we humans tend to favor, given that those tasks and environments are such a small subset of the possible tasks and environments an entity could face.  And any attempt to escape anthropocentrism by creating an unbiased and properly weighted taxonomy of task types and environments is either hopeless or liable to generate a variety of very different but equally plausible arbitrary composites.&lt;p&gt;
  
AI systems, like nonhuman animals and neuroatypical people, can combine skills and deficits in patterns that are unfamiliar to those who have attended mostly to typical human cases.  AI systems are highly unlikely to replicate every human capacity, due to limits in data and optimization, as well as a fundamentally different underlying architecture.  They struggle to do many things that ordinary humans do effortlessly, such as reliably interpreting everyday visual scenes and performing feats of manual dexterity.  But the reverse is also true: Humans cannot perform some feats that machines perform in a fraction of a second.  If we think of intelligence as irreducibly multidimensional instead of linear -- as always relativized to the immense number of possible goals and environments -- we can avoid the temptation to try to reach a scalar judgment about which type of entity is actually smarter and by how much.&lt;p&gt;
  
We might think of typical human intelligence as &quot;familiar intelligence&quot; -- familiar to us, that is -- and artificial intelligence as &quot;strange intelligence&quot;.  This terminology wears its anthropocentrism on its sleeve, rather than masking it under false objectivity.  Something possesses familiar intelligence to the degree it thinks like us.  It is a similarity relation.  How familiar an intelligence is depends on several factors.  Some are architectural: What forms does the basic cognitive processing take?  What shortcuts and heuristics does it rely on?  How serial or parallel is it?  How fast?  With what sorts of redundancy, modularity, and self-monitoring for errors?  Others are learned and cultural: learned habits, particular cultural practices, acquired skills, chosen effort based on perceived costs and benefits.  An intelligence is &lt;i&gt;outwardly familiar&lt;/i&gt; if it acts like us in intelligence-based tasks.  And it is &lt;i&gt;inwardly familiar&lt;/i&gt; if it does so by the same underlying cognitive mechanisms.&lt;p&gt;
  
Familiarity is also a matter of degree: The intelligence of dogs is more familiar to us (in most respects) than that of octopuses.  Although we share some common features with octopuses, they evolved in a very different environment and have very dissimilar cognitive architecture as a result.  It&#39;s hard for us even to understand their goals, because their existence is so different.  Still, as distant as our minds are from those of octopuses, we share with octopuses the broadly familiar lifeways of embodied animals who need to navigate the natural world, find food, and mate.&lt;p&gt;
  
AI constitutes an even stranger form of intelligence.  With architectures, environments, and goals so fundamentally unlike ours, AI is the strangest intelligence we have yet to encounter.  AI is not a biological organism; it was not shaped by the evolutionary pressures shared by every living being on Earth, and it does not have the same underlying needs.  It is based on an inorganic substrate totally unlike all biological neurophysiology.  Its goals are imposed by its makers rather than being autopoietic.  Such intelligence should be expected to behave in ways radically different from familiar minds.  This raises an epistemic challenge: Understanding and measuring strange intelligence may be extremely difficult for us.  Plausibly, the stranger an intelligence is from our perspective, the easier it is for us to fail to appreciate what it’s up to.  Strange intelligences rely on methods alien to our cognition.&lt;p&gt;

If intelligence were linear and one-dimensional, then a single example of an egregious mistake by an AI -- a mistake a human would never make, like confusing a strawberry for a toy poodle -- would be enough to show that the systems are nowhere near our level of intelligence.  However, since intelligence is massively multidimensional, all these cases show on their own is that these systems have certain lacunae or blindspots.  Of course, we humans also have lacunae and blind spots – just consider optical illusions.  Our susceptibility to optical illusions is not used as evidence of our lack of general intelligence, however ridiculous our mistakes might seem to any entity not subject to those same illusions.&lt;p&gt;
  
Full draft &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/StrangeIntelligence.htm&quot;&gt;here&lt;/a&gt;.</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/7126519913303919387/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/7126519913303919387' title='3 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7126519913303919387'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7126519913303919387'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/02/artificial-intelligence-as-strange.html' title='Artificial Intelligence as Strange Intelligence: Against Linear Models of Intelligence (New Paper in Draft)'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEguGK1D6JD1dEKVoK28CWA7E22myZorJJUEnyw44adrBOkjh7nAOm8fZhyphenhyphenchGuynCvt9Vil3npi4YejDx0XmmKm8U5Sl5p_LBrNLYL4RD0zRBJccM16ZtPO44r27K91VV-w50FoCoZbmaPvw1emdax24aOFSSXFyysR_F7_xxWl0bd5ZAWSvcUWFA/s72-c/StrangeIntelligence.jpg" height="72" width="72"/><thr:total>3</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-5787231918383386992</id><published>2026-01-30T09:24:00.000-08:00</published><updated>2026-01-30T09:29:46.616-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI"/><category scheme="http://www.blogger.com/atom/ns#" term="consciousness"/><title type='text'>Does Global Workspace Theory Solve the Question of AI Consciousness?</title><content type='html'>Hint: no.&lt;p&gt;
  
Below are three sections from Chapter Eight of my manuscript in draft, &lt;i&gt;AI and Consciousness&lt;/i&gt;, fresh new version available today &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/AIConsciousness.htm&quot;&gt;here&lt;/a&gt;.  Comments welcome!&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhFYamyf8JaJfQvBNRUokBSsYa6lvv64xMSj68_zwGapeJLeZ14JBeHMY69lYdRjW2slBRMiI9uBG0Xrt4BREAmUwj5WxMJBIkvvUqMvTo-LXrBugZ2s2hH_WwXEH8ajkJpJbdFHCUUQ2Bb00FiCblVFLvQZbDPXR3XjmqION5qwkp4iAwGoJHxZg/s559/RobotWorkspace.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; height=&quot;320&quot; data-original-height=&quot;559&quot; data-original-width=&quot;396&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhFYamyf8JaJfQvBNRUokBSsYa6lvv64xMSj68_zwGapeJLeZ14JBeHMY69lYdRjW2slBRMiI9uBG0Xrt4BREAmUwj5WxMJBIkvvUqMvTo-LXrBugZ2s2hH_WwXEH8ajkJpJbdFHCUUQ2Bb00FiCblVFLvQZbDPXR3XjmqION5qwkp4iAwGoJHxZg/s320/RobotWorkspace.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[image adapted from &lt;a href=&quot;https://link.springer.com/chapter/10.1007/978-3-642-18015-6_4&quot;&gt;Dehaene et al. 2011&lt;/a&gt;]&lt;p&gt;&lt;/br&gt;
  
&lt;i&gt;1. Global Workspace Theories and Access.&lt;p&gt;&lt;/i&gt;
  
The core idea of Global Workspace Theory is simple.  Sophisticated cognitive systems like the human mind employ specialized processes that operate to a substantial extent in isolation.  We can call these &lt;i&gt;modules&lt;/i&gt;, without committing to any strict interpretation of that term.[1]  For example, when you hear speech in a familiar language, some cognitive process converts the incoming auditory stimulus into recognizable speech.  When you type on a keyboard, motor functions convert your intention to type a word like “consciousness” into nerve signals that guide your fingers.  When you try to recall ancient Chinese philosophers, some cognitive process pulls that information from memory without (amazingly) clogging your consciousness with irrelevant information about German philosophers, British prime ministers, rock bands, or dog breeds.&lt;p&gt;
  
Of course, not all processes are isolated.  Some information is widely shared, influencing or available to influence many other processes.  Once I recall the name “Zhuangzi”, the thought “Zhuangzi was an ancient Chinese philosopher” cascades downstream.  I might say it aloud, type it out, use it as a premise in an inference, form a visual image of Zhuangzi, contemplate his main ideas, attempt to sear it into memory for an exam, or use it as a clue to decipher a handwritten note.  To say that some information is in “the global workspace” just is to say that it is available to influence a wide range of cognitive processes.  According to Global Workspace Theory, a representation, thought, or cognitive process is conscious if and only if it is in the global workspace – if it is “widely broadcast to other processors in the brain”, allowing integration both in the moment and over time.[2]&lt;p&gt;
  
Recall the ten possibly essential features of consciousness from &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/AIConsciousness.htm&quot;&gt;Chapter Three&lt;/a&gt;: luminosity, subjectivity, unity, access, intentionality, flexible integration, determinacy, wonderfulness, specious presence, and privacy.  [Blog readers: You won&#39;t have read Chapter Three, but try to ride with it anyway.]  Global Workspace Theory treats &lt;i&gt;access&lt;/i&gt; as the central essential feature.&lt;p&gt;
  
Global Workspace theory can potentially explain other possibly essential features.  &lt;i&gt;Luminosity&lt;/i&gt; follows if processes or representations in the workspace are available for introspective processes of self-report.  &lt;i&gt;Unity&lt;/i&gt; might follow if there’s only one workspace, so that everything in it is present together.  &lt;i&gt;Determinacy&lt;/i&gt; might follow if there’s a bright line between being in the workspace and not being in it.  &lt;i&gt;Flexible integration&lt;/i&gt; might follow if the workspace functions to flexibly combine representations or processes from across the mind.  &lt;i&gt;Privacy&lt;/i&gt; follows if only you can have direct access to the contents of your workspace.  &lt;i&gt;Specious presence&lt;/i&gt; might follow if representations or processes generally occupy the workspace for some hundreds of milliseconds.&lt;p&gt;
  
In ordinary adult humans, typical examples of conscious experience – your visual experience of this text, your emotional experience of fear in a dangerous situation, your silent inner speech, your conscious visual imagery, your felt pains – appear to have the broad cognitive influences Global Workspace Theory describes.  It’s not as though we commonly experience pain but find that we can’t report it or act on its basis, or that we experience a visual image of a giraffe but can’t engage in further thinking about the content of that image.  Such general facts, plus the theory’s potential to explain features such as luminosity, unity, determinacy, flexible integration, privacy, and specious presence, lend Global Workspace Theories substantial initial attractiveness.&lt;p&gt;
  
I have treated Global Workspace Theory as if it were a single theory, but it encompasses a family of theories that differ in detail, including “broadcast” and “fame” theories – any theory that treats the broad accessibility of a representation, thought, or process as the central essential feature making it conscious.[3]&lt;p&gt;
  
Consider two contrasting views: Dehaene’s Global Neuronal Workspace Theory and Daniel Dennett’s “fame in the brain” view.  Dehaene holds that entry into the workspace is all-or-nothing.  Once a process “ignites” into the workspace, it does so completely.  Every representation or process either stops short of entering consciousness or is broadcast to all available downstream processes.  Dennett’s fame view, in contrast, admits degrees.  Representations or processes might be more or less famous, available to influence some downstream cognitive processes without being available to influence others.  There is no one workspace, but a pandemonium of competing processes.[4]  If Dennett is correct, luminosity, determinacy, unity, and flexible integration all potentially come under threat in a way they do not as obviously come under threat on Dehaene’s view.[5]&lt;p&gt;
  
Dennettian concerns notwithstanding, all-or-nothing ignition into a single, unified workspace is currently the dominant version of Global Workspace Theory.  The issue remains unsettled and has obvious implications for the types of architectures that might plausibly host AI consciousness.&lt;p&gt;

&lt;i&gt;2. Consciousness Outside the Workspace; Nonconsciousness Within It?&lt;p&gt;&lt;/i&gt;
  
Global Workspace Theory is not the correct theory of consciousness unless all and only thoughts, representations, or processes in the Global Workspace are conscious.  Otherwise, something else, or something additional, is necessary for consciousness.&lt;p&gt;
  
It is not clear that even in ordinary adult humans a process must be in the Global Workspace to be conscious.  Consider the case of peripheral experience.  Some theorists maintain that people have rich sensory experiences outside of focal attention: a constant background experience of your feet in your shoes and objects in the visual periphery.[6]  Others – including Global Workspace theorists – dispute this.  Introspective reports vary, and resolving such issues is methodologically tricky.&lt;p&gt;
  
One methodological problem: People who report constant peripheral experiences might mistakenly assume that such experiences are always present because they are always present &lt;i&gt;whenever they think to check&lt;/i&gt;, and the very act of checking might generate those experiences.  This is sometimes called the “refrigerator light illusion”, akin to the error of thinking the refrigerator light is always on because it’s always on when you open the door to check.[7]  On this view, you’re only tempted to think you have constant tactile experience of your feet in your shoes because you have that experience on those rare occasions when you’re thinking about whether you have it.  Even if you now seem to have a broad range of experiences in different sensory modalities simultaneously, this could result from an unusual act of dispersed attention, or from “gist” perception or “ensemble” perception, in which you are conscious of the general gist or general features of a scene, knowing that there are details, without actually experiencing those unattended details.[8]&lt;p&gt;
  
The opposite mistake is also possible.  Those who deny a constant stream of peripheral experiences might simply be failing to notice or remember them.  The fact that you don’t remember now the sensation of your feet in your shoes two minutes ago hardly establishes that you lacked the sensation at the time.  Although many people find it introspectively compelling that their experience is rich with detail or that it is not, the issue is methodologically complex because introspection and memory are not independent of the phenomena to be observed.[9]&lt;p&gt;
    
If we do have rich sensory experience outside of attention, it is unlikely that all of that experience is present in or broadcast to a Global Workspace.  Unattended peripheral information is rarely remembered or consciously acted upon, tending to exert limited downstream influence – the paradigm of information that is not widely broadcast.  Moreover, the Global Workspace is generally characterized as limited capacity, containing only a few thoughts, representations, objects, or processes at a time – those that survive some competition or attentional selection – not a welter of richly detailed experiences in many modalities at once.[10]&lt;p&gt;
  
A less common but equally important objection runs in the opposite direction: Perhaps not everything in the Global Workspace is conscious.  Some thoughts, representations, or processes might be widely broadcast, shaping diverse processes, without ever reaching explicit awareness.[11]  Implicit racist assumptions, for example, might influence your mood, actions, facial expressions, and verbal expressions.  The goal of impressing your colleagues during a talk might have pervasive downstream effects without occupying your conscious experience moment to moment.&lt;p&gt;
  
The Global Workspace theorist who wants to allow that such processes are not conscious might suggest that, at least for adult humans, processes in the workspace are generally also available for introspection.   But there’s substantial empirical risk in this move.  If the correlation between introspective access and availability for other types of downstream cognition isn’t excellent, the Global Workspace theorist faces a dilemma.  Either allow many conscious but nonintrospectable processes, violating widespread assumptions about luminosity, or redefine the workspace in terms of introspectability, which amounts to shifting to a Higher Order view.&lt;p&gt;

&lt;i&gt;3. Generalizing Beyond Vertebrates.&lt;p&gt;&lt;/i&gt;
  
The empirical questions are difficult even in ordinary adult humans.  But our topic isn’t ordinary adult humans – it’s AI systems.  For Global Workspace Theory to deliver the right answers about AI consciousness, it must be a &lt;i&gt;universal&lt;/i&gt; theory applicable everywhere, not just a theory of how consciousness works in adult humans, vertebrates, or even all animals.&lt;p&gt;
  
If there were a sound &lt;i&gt;conceptual&lt;/i&gt; argument for Global Workspace Theory, then we could know the theory to be universally true of all conscious entities.  Empirical evidence would be unnecessary.  It would be as inevitably true as that rectangles have four sides.  But as I argued in &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/AIConsciousness.htm&quot;&gt;Chapter Four&lt;/a&gt;, conceptual arguments for the essentiality of any of the ten possibly essential features are unlikely to succeed – and a conceptual argument for Global Workspace Theory would be tantamount to a conceptual argument for the essentiality of access, one of those ten features.  Not only do the general observations of Chapter Four suggest against a conceptual guarantee, so also does the apparent conceivability, as described in Section 2 above, of consciousness outside the workspace or nonconsciousness within it – even if such claims are empirically false.&lt;p&gt;
  
If Global Workspace Theory is the correct universal theory of consciousness applying to all possible entities, an empirical argument must establish that fact.  But it’s hard to see how such an empirical argument could proceed.  We face another version of the Problem of the Narrow Evidence Base.  Even if we establish that in ordinary humans, or even in all vertebrates, a thought, representation, or process is conscious if and only if it occupies a Global Workspace, what besides a conceptual argument would justify treating this as a universal truth that holds among all possible conscious systems?&lt;p&gt;
  
Consider some alternative architectures.  The cognitive processes and neural systems of octopuses, for example, are distributed across their bodies, often operating substantially independently rather than reliably converging into a shared center.[12]   AI systems certainly can be, indeed often are, similarly decentralized.  Imagine coupling such disunity with the capacity for self-report – an animal or AI system with processes that are reportable but poorly integrated with other processes.  If we assume Global Workspace Theory at the outset, we can conclude that only sufficiently integrated processes are conscious.  But if we don’t assume Global Workspace Theory at the outset, it’s difficult to imagine what near-future evidence could establish that fact beyond a reasonable standard of doubt to a researcher who is initially drawn to a different theory.&lt;p&gt;
  
If the simplest version of Global Workspace Theory is correct, we can easily create a conscious machine.  This is what Dehaene and collaborators envision in the 2017 paper I discussed in Chapter One.  Simply create a machine – such as an autonomous vehicle – with several input modules, several output modules, a memory store, and a central hub for access and integration across the modules.  Consciousness follows.  If this seems doubtful to you, then you cannot straightforwardly accept the simplest version of Global Workspace Theory.[13]&lt;p&gt;
  
We can apply Global Workspace Theory to settle the question of AI consciousness only if we know the theory to be true either on conceptual grounds or because it is empirically well established as the correct universal theory of consciousness applicable to all types of entity.  Despite the substantial appeal of Global Workspace Theory, we cannot know it to be true by either route.&lt;p&gt;
  
-------------------------------------&lt;p&gt;
  
[1] Full Fodorian (1983) modularity is not required.&lt;p&gt;
  
[2] Mashour et al. 2020, p. 776-777.&lt;p&gt;
  
[3] E.g., Baars 1988; Dennett 1991, 2005; Tye 2000; Prinz 2012; Dehaene 2014; Mashour et al. 2020.&lt;p&gt;
  
[4] Whether Dennett’s view is more plausible than Dehaene’s turns on whether, or how commonly, representations or processes are &lt;i&gt;partly&lt;/i&gt; famous.  Some visual illusions, for example, seem to affect verbal report but not grip aperture: We &lt;i&gt;say&lt;/i&gt; that X looks smaller than Y, but when we &lt;i&gt;reach&lt;/i&gt; toward X and Y we open our fingers to the same extent, accurately reflecting that X and Y are the same size.  The fingers sometimes know what the mouth does not. (Aglioti et al. 1995; Smeets et al. 2020).  We adjust our posture while walking and standing in response to many sources of information that are not fully reportable, suggesting wide integration but not full accessibility (Peterka 2018; Shanbhag 2023).  Swift, skillful activity in sports, in handling tools, and in understanding jokes also appears to require integrating diverse sources of information, which might not be &lt;i&gt;fully&lt;/i&gt; integrated or reportable (Christensen et al. 2019; Vauclin et al. 2023; Horgan and Potrč 2010).  In response, the all-or-nothing “ignition” view can explain away such cases of seeming intermediacy or disunity as atypical (it needn’t commit to 100% exceptionless ignition with no gray-area cases), by allowing some nonconscious communication among modules (which needn’t be entirely informationally isolated), and/or by allowing for erroneous or incomplete introspective report (maybe some conscious experiences are too brief, complex, or subtle for people to confidently report experiencing them).&lt;p&gt;
  
[5] Despite developing a theory of consciousness, Dennett (2016) endorsed “illusionism”, which rejects the reality of phenomenal consciousness (see especially Frankish 2016).  I interpret the dispute between illusionists and nonillusionists as a verbal dispute about whether the specific philosophical concept of “phenomenal consciousness” requires immateriality, irreducibility, perfect introspectibility, or some other dubious property, or whether the term can be “innocently” used without invoking such dubious properties.  See Schwitzgebel 2016, 2025.&lt;p&gt;
  
[6] Reviewed in Schwitzgebel 2011, ch. 6; and though limited only to stimuli near the center of the visual field, see the large literature on “overflow” in response to Block 2007.&lt;p&gt;
  
[7] Thomas 1999.&lt;p&gt;
  
[8] Oliva and Terralba 2006; Whitney and Leib 2018.&lt;p&gt;
  
[9] Schwitzgebel 2007 explores the methodological challenges in detail.&lt;p&gt;
  
[10] E.g., Dehaene 2014; Mashour et al. 2020.&lt;p&gt;
  
[11] E.g., Searle 1983, ch. 5; Bargh and Morsella 2008; Lau 2022; Michel et al. 2025; see also note 4.&lt;p&gt;
  
[12] Godfrey-Smith 2016; Carls-Diamante 2022.&lt;p&gt;
  
[13] See also Goldstein and Kirk-Giannini (forthcoming) for an extended application of Global Workspace Theory to AI consciousness.  One might alternatively read Dehaene, Lau, and Kouider 2017 purely as a conceptual argument: If all we mean by “conscious” is “accessible in a Global Workspace”, then building a system of this sort suffices for building a conscious entity.  The difficulty then arises in moving from that stipulative conceptual claim to the interesting, substantive claim about phenomenal consciousness in the standard sense described in Chapter Two.  Similar remarks apply to the Higher Order aspect of that article.  One challenge for this deflationary interpretation is that in related works (Dehaene 2014; Lau 2022) the authors treat their accounts as accounts of phenomenal consciousness.  The article concludes by emphasizing that in humans “subjective experience coheres with possession” of the functional features they identify.  A further complication: Lau later says that the way he expressed his view in this 2017 article was “unsatisfactory”: Lau 2022, p. 168.</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/5787231918383386992/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/5787231918383386992' title='4 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/5787231918383386992'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/5787231918383386992'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/01/does-global-workspace-theory-solve.html' title='Does Global Workspace Theory Solve the Question of AI Consciousness?'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhFYamyf8JaJfQvBNRUokBSsYa6lvv64xMSj68_zwGapeJLeZ14JBeHMY69lYdRjW2slBRMiI9uBG0Xrt4BREAmUwj5WxMJBIkvvUqMvTo-LXrBugZ2s2hH_WwXEH8ajkJpJbdFHCUUQ2Bb00FiCblVFLvQZbDPXR3XjmqION5qwkp4iAwGoJHxZg/s72-c/RobotWorkspace.jpg" height="72" width="72"/><thr:total>4</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-3866622817631716288</id><published>2026-01-23T09:25:00.000-08:00</published><updated>2026-01-23T09:47:35.530-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="consciousness"/><title type='text'>Is Signal Strength a Confound in Consciousness Research?</title><content type='html'>&lt;a href=&quot;https://philosophy.mit.edu/mmichel/&quot;&gt;Matthias Michel&lt;/a&gt; is among the sharpest critics of the methods of consciousness science.  His forthcoming paper, &quot;&lt;a href=&quot;https://philarchive.org/rec/MICCDD&quot;&gt;Consciousness Doesn&#39;t Do That&lt;/a&gt;&quot;, convincingly challenges background assumptions behind recent efforts to discover the causes, correlates, and prevalence of consciousness.  It should be required reading for anyone tempted to argue, for example, that &lt;a href=&quot;https://dictionary.apa.org/trace-conditioning&quot;&gt;trace conditioning&lt;/a&gt; correlates with consciousness in humans and thus that nonhuman animals capable of trace conditioning must also be conscious.&lt;p&gt;
  
But Michel does make one claim that bugs me, and that claim is central to the article.  And &lt;a href=&quot;https://docs.google.com/document/d/e/2PACX-1vTR5oCNhCA8AM-EVW0YN2SwiR1SgAxwbcyFYGACrk-VxcpfAQomnWYWTD7tUW3HlfiSePw0N-6JrLxo/pub&quot;&gt;Hakwan Lau&lt;/a&gt; -- another otherwise terrific methodologist -- makes a similar claim in his 2022 book &lt;a href=&quot;https://global.oup.com/academic/product/in-consciousness-we-trust-9780198856771&quot;&gt;&lt;i&gt;In Consciousness We Trust&lt;/i&gt;&lt;/a&gt;, and again the claim is central to the argument of that book.  So today I&#39;m going to poke at that claim, and maybe it will burst like a sour blueberry.&lt;p&gt;
  
The claim: &lt;b&gt;Signal strength (performance capacity, in Lau&#39;s version) is a confound in consciousness research.&lt;/b&gt;&lt;p&gt;
  
As Michel uses the phrase, &quot;signal strength&quot; is how discriminable a perceptible feature is to a subject.  A sudden, loud blast of noise has high signal strength.  It&#39;s very easy to notice.  A faint wavy pattern in a gray field, presented for a tenth of second, has low signal strength.  It is easy to miss.  Importantly, signal strength is not the same as (objective, externally measurable) stimulus intensity, but reflects how well &lt;i&gt;the perceiver responds&lt;/i&gt; to the signal.&lt;p&gt;
  
&lt;b&gt;Signal strength clearly &lt;i&gt;correlates&lt;/i&gt; with consciousness.&lt;/b&gt;  You&#39;re much more likely to be conscious of stimuli that you find easy to discriminate than stimuli that you find difficult to discriminate.  The loud blare is consciously experienced.  The faint wavy pattern might or might not be.  A stimulus with effectively zero signal strength -- say, a gray dot flashed for a millionth of a second and immediately masked -- will normally not be experienced at all.&lt;p&gt;
  
&lt;b&gt;But signal strength is not the &lt;i&gt;same&lt;/i&gt; as consciousness.&lt;/b&gt;  The two can come apart.  The classic example is &lt;a href=&quot;https://en.wikipedia.org/wiki/Blindsight&quot;&gt;blindsight&lt;/a&gt;.  On the standard interpretation (but see &lt;a href=&quot;https://perception.jhu.edu/files/PDFs/Misc/Phillips_Blindsight_PsychRev.pdf&quot;&gt;Phillips 2020&lt;/a&gt; for an alternative), patients with a specific type of visual cortex damage can discriminate stimuli that they cannot consciously perceive.  Flash either an &quot;X&quot; or an &quot;O&quot; in the blind part of their visual field and they will say they have no visual experience of it.  But ask them to guess which letter was shown and their performance is well above chance -- up to 90% correct in some tasks.  The &quot;X&quot; has some signal strength for them: It&#39;s discriminable but not consciously experienced.&lt;p&gt;
  
If signal strength is not consciousness but often correlates with it, the following worry arises.  When a researcher claims that &quot;trace conditioning is only possible for conscious stimuli&quot; or &quot;consciousness facilitates episodic memory&quot;, how do you know that it&#39;s really &lt;i&gt;consciousness&lt;/i&gt; doing the work, rather than signal strength?  Maybe stimuli with high signal strength are both more likely to be consciously experienced and more likely to enable trace conditioning and episodic memory.  Unless researchers have carefully separated the two, the causal role of consciousness remains unclear.&lt;p&gt;
  
&lt;b&gt;An understandable methodological response is to try to control for signal strength:&lt;/b&gt; Present stimuli of similar discriminability to the subject but which differ in whether (or to what extent) they are consciously experienced.  Only then, the reasoning goes, can differences in downstream effects be confidently attributed to consciousness itself rather than differences in signal strength.  Lau in particular stresses the importance of such controls.  Yet such careful matching is difficult and rarely attempted.  On this reasoning, much of the literature on the cognitive role of consciousness is built on sand, not clearly distinguishing the effects of consciousness from the effects of signal strength.&lt;p&gt;
  
This reasoning is attractive but faces an obvious objection, which both Michel and Lau address directly.  &lt;b&gt;What if signal strength &lt;i&gt;just is&lt;/i&gt; consciousness?&lt;/b&gt;  Then trying to &quot;control&quot; for it would erase the phenomenon of interest.&lt;p&gt;
  
Both Michel and Lau analogize to height and bone length.  If you want to test whether height confers an advantage in basketball or dating, you might want to control for skin color, but it would be absurd to control for &lt;i&gt;bone length&lt;/i&gt;.  If skin color correlates with height and you want to see whether height specifically advantages people in basketball or dating, it makes sense to control for differences in skin color by systematically comparing people with the same skin color but different heights.  If the advantage persists, you can infer that height rather than skin color is doing the work.  But trying to control for bone length lands you in nonsense.  Taller people just &lt;i&gt;are&lt;/i&gt; the people with longer bones.&lt;p&gt;
  
&lt;b&gt;Michel and Lau respond by noting that consciousness and signal strength (or performance capacity) sometimes dissociate&lt;/b&gt;, as in blindsight.  Therefore, they are not the same thing and it &lt;i&gt;does&lt;/i&gt; make sense to control for one in exploring the effects of the other.&lt;p&gt;
  
But this response is too simple and too fast.&lt;p&gt;
  
We can see this even in their chosen example.  Height and bone length &lt;i&gt;aren&#39;t&lt;/i&gt; quite the same thing.  They can dissociate.  People are about 1-2 cm taller in the morning than at night -- not because their bones have grown but because the tissue between the bones (especially in the spine) compresses during the day.&lt;p&gt;
  
Now imagine an argument parallel to Michel&#39;s and Lau&#39;s: Since height and bone length can come apart, we  should try to control for bone length in examining the effects of height on basketball and dating.  We then compare the same people&#39;s basketball and dating outcomes in the morning and at night, &quot;holding bone length fixed&quot; while height varies slightly.  This would be a methodological mistake.  For one thing, we&#39;ve introduced a new potential confound, time of day.  For another, even if the centimeter in the morning really does help a little, we&#39;ve dramatically reduced our ability to detect the real effect of height by &quot;overcontrolling&quot; for a component of the target variable, height.&lt;p&gt;

Consider a psychological example.  The personality trait of extraversion can be broken into &quot;&lt;a href=&quot;https://psycnet.apa.org/record/2016-17156-001?doi=1&quot;&gt;facets&lt;/a&gt;&quot;, such as sociability, assertiveness, and energy level.  Since energy level is only one aspect of extraversion, the two can dissociate.  Some people are energetic but not sociable or assertive; others are sociable and assertive but low-energy.  If you wanted to measure the influence of extraversion on, say, judgments of likeability in the workplace, you wouldn&#39;t want to control for energy level.  That would be overcontrol, like controlling for bone length in attempting to assess the effects of height.  It would strip away part of the construct you are trying to measure.&lt;p&gt;
  
What I hope these examples make clear is that &lt;b&gt;dissociability between correlates A and B does not automatically make B a confound that must be controlled when studying A&#39;s effects&lt;/b&gt;. Bone length is dissociable from height, but it is a component, not a confound.  Energy level is dissociable from extraversion, but it is a component, not a confound.&lt;p&gt;
  
&lt;b&gt;The real question, then, is whether signal strength (or performance capacity) is better viewed as a component or facet of consciousness than as a separate variable that needs to be held constant in testing the effects of consciousness.&lt;/b&gt;&lt;p&gt;
  
&lt;b&gt;A case can be made that it is. &lt;/b&gt;Consider &lt;a href=&quot;https://en.wikipedia.org/wiki/Global_workspace_theory&quot;&gt;Global Workspace Theory&lt;/a&gt;, one of the leading theories of consciousness.  On this view, a process or representation is conscious if it is broadly available for &quot;downstream cognition&quot; such as verbal report, long-term memory, and rational planning.  If discrimination judgments are among those downstream capacities, then one facet of being in the global workspace (that is, on this view, being conscious) is enabling such judgments.  But recall that signal strength just is discriminability for a subject.  If so, things begin to look like the extraversion / energy case.  Controlling for discriminability would be overcontrolling, that is, attempting to equalize or cancel the effects not of a separate, confounding process, but of a component of the target process itself.  (Similar remarks hold for Lau&#39;s &quot;performance capacity&quot;.)&lt;p&gt;
  
Global Workspace Theory might not be correct.  And if it&#39;s not, maybe signal strength is indeed a confounder, rather than a component of consciousness.  But the case for treating signal strength as a confounder can&#39;t be established simply by noticing the possibility of dissociations between consciousness and signal strength.  Furthermore, since Michel&#39;s and Lau&#39;s recommended methodology can be trusted not to suffer from overcontrol bias only if Global Workspace Theory is false, it&#39;s circular to rely on that methodology to argue against Global Workspace Theory.&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjYyt9SoeUkjOpZM_Ufavuzjoa86gNIEqe1a9K8TiazF1T0IgWAyerxRVtXh-qkG-8ivrwpSZOS9I0JL6gAjpjKhmzyF9gJL_OWvaqg7GMOt84ceO4MVuJKkpqRQ5In9I2rr2Y7Q_GGm9plPbVn0M3bfXXEnAOhnsSEsHMlwLyF8ioskmj7mtA_bg/s405/Michel.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;294&quot; data-original-width=&quot;405&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjYyt9SoeUkjOpZM_Ufavuzjoa86gNIEqe1a9K8TiazF1T0IgWAyerxRVtXh-qkG-8ivrwpSZOS9I0JL6gAjpjKhmzyF9gJL_OWvaqg7GMOt84ceO4MVuJKkpqRQ5In9I2rr2Y7Q_GGm9plPbVn0M3bfXXEnAOhnsSEsHMlwLyF8ioskmj7mtA_bg/s320/Michel.jpg&quot;/&gt;&lt;/a&gt;</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/3866622817631716288/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/3866622817631716288' title='25 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/3866622817631716288'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/3866622817631716288'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/01/is-signal-strength-confound-in.html' title='Is Signal Strength a Confound in Consciousness Research?'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjYyt9SoeUkjOpZM_Ufavuzjoa86gNIEqe1a9K8TiazF1T0IgWAyerxRVtXh-qkG-8ivrwpSZOS9I0JL6gAjpjKhmzyF9gJL_OWvaqg7GMOt84ceO4MVuJKkpqRQ5In9I2rr2Y7Q_GGm9plPbVn0M3bfXXEnAOhnsSEsHMlwLyF8ioskmj7mtA_bg/s72-c/Michel.jpg" height="72" width="72"/><thr:total>25</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-6772792749554330970</id><published>2026-01-14T12:42:00.000-08:00</published><updated>2026-01-14T12:45:48.567-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI"/><category scheme="http://www.blogger.com/atom/ns#" term="consciousness"/><title type='text'>AI Mimics and AI Children</title><content type='html'>There&#39;s no shame in losing a contest for a long-form popular essay on AI consciousness to the eminent neuroscientist &lt;a href=&quot;https://loc.closertotruth.com/berggruen-prize-essay-competition-2025&quot;&gt;Anil Seth&lt;/a&gt;.  Berggruen has published my piece &quot;&lt;a href=&quot;https://loc.closertotruth.com/essay/ai-mimics-and-ai-children&quot;&gt;AI Mimics and AI Children&lt;/a&gt;&quot; among a couple dozen shortlisted contenders.&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj-3Nwxg7rLbmUuwjieVGF_iybaENMANKoUiZX8kHSq8fsYtUeHnLze-yyNmdrgwHiXHtb83XVnkAvB8A6HM7XKX7CA9lTqHevCeFhJr7vCjHRaLtKoh3dKUWv_yKX4RtDiZ5vc6uaCUBMLs9d2vhu2jiuzfGDrcy0XKn0lyIvVs1fcRsnxun9hvg/s903/Berggruen.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;509&quot; data-original-width=&quot;903&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj-3Nwxg7rLbmUuwjieVGF_iybaENMANKoUiZX8kHSq8fsYtUeHnLze-yyNmdrgwHiXHtb83XVnkAvB8A6HM7XKX7CA9lTqHevCeFhJr7vCjHRaLtKoh3dKUWv_yKX4RtDiZ5vc6uaCUBMLs9d2vhu2jiuzfGDrcy0XKn0lyIvVs1fcRsnxun9hvg/s320/Berggruen.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;p&gt;

When the aliens come, we’ll know they’re conscious. A saucer will land. A titanium door will swing wide. A ladder will drop to the grass, and down they’ll come – maybe bipedal, gray-skinned, and oval-headed, just as we’ve long imagined. Or maybe they’ll sport seven limbs, three protoplasmic spinning sonar heads, and gaseous egg-sphere thoughtpods. “Take me to your leader,” they’ll say in the local language, as cameras broadcast them live around the world. They’ll trade their technology for our molybdenum, their science for samples of our beetles and ferns, their tales of galactic history for U.N. authorization to build a refueling station at the south pole. No one (only a few philosophers) will wonder, &lt;i&gt;but do these aliens really have thoughts and experiences, feelings, consciousness?&lt;/i&gt;&lt;p&gt;
  
The robots are coming. Already they talk to us, maybe better than those aliens will. Already we trust our lives to them as they steer through traffic. Already they outthink virtually all of us at chess, Go, Mario Kart, protein folding, and advanced mathematics. Already they compose smooth college essays on themes from Hamlet while drawing adorable cartoons of dogs cheating at poker. You might understandably think: The aliens are already here. We made them.&lt;p&gt;

Still, we hesitate to attribute genuine consciousness to the robots. Why?&lt;p&gt;

My answer is &lt;i&gt;because we made them in our image&lt;/i&gt;.&lt;p&gt;

#&lt;p&gt;

“Consciousness” has an undeserved reputation as a slippery term. Let’s fix that now.&lt;p&gt;

Consider your visual experience as you look at this text. Pinch the back of your hand and notice the sting of pain. Silently hum your favorite show tune. Recall that jolt of fear you felt during a near-miss in traffic. Imagine riding atop a giant turtle. That visual experience, that pain, that tune in your head, that fear, that act of imagination – they share an obvious property. That obvious property is consciousness. In other words: They are subjectively experienced. There’s “something it’s like” to undergo them. They have a qualitative character. They feel a certain way.&lt;p&gt;

It’s not just that these processes are mental or that they transpire (presumably) in your brain. Some mental and neural processes aren’t conscious: your knowledge, not actively recalled until just now, that Confucius lived in ancient China; the early visual processing that converts retinal input into experienced shape (you experience the shape but not the process that renders the shape); the myelination of your axons.&lt;p&gt;

Don’t try to be clever. Of course you can imagine some &lt;i&gt;other&lt;/i&gt; property, besides consciousness, shared by the visual experience, the pain, etc., and absent from the unrecalled knowledge, early visual processing, etc. For example: the property of being mentioned by me in a particular way in this essay. The property of being conscious and also transpiring near the surface of Earth. The property of being targeted by such-and-such scientific theory.&lt;p&gt;

There is, I submit, one obvious property that blazes out a bright red &lt;i&gt;this-is-it&lt;/i&gt; when you think about the examples. That’s consciousness. That’s the property we would reasonably attribute to the aliens when they raise their gray tentacles in peace, the property that rightly puzzles us about future AI systems.&lt;p&gt;

The term “consciousness” only seems slippery because we can’t (yet?) define it in standard scientific or analytic fashion. We can’t dissect it into simpler constituents or specify exactly its functional role. But we all know what it is. We care intensely about it. It makes all the difference to how we think about and value something. Does the alien, the robot, the scout ant on the kitchen counter, the earthworm twisting in your gardening glove, really feel things? Or are they blank inside, mere empty machines or mobile plants, so to speak? If they really feel things, then they matter for their own sake – at least a little bit. They matter in a certain fundamental way that an entity devoid of experience never could.&lt;p&gt;

#&lt;p&gt;

With respect to aliens, I recommend a &lt;i&gt;Copernican&lt;/i&gt; perspective. In scientific cosmology, the Copernican Principle invites us to assume – at least as a default starting point, pending possible counterevidence – that we don’t occupy any particularly special location in the cosmos, such as the exact center. A Copernican Principle of Consciousness suggests something similar. We are not at the center of the cosmological “consciousness-is-here” map. If consciousness arose on Earth, almost certainly it has arisen elsewhere.&lt;p&gt;

Astrobiology, as a scientific field, is premised on the idea that life has probably arisen elsewhere. Many expect to find evidence of it in our solar system within a few decades, maybe on Mars, maybe in the subsurface oceans of an icy moon. Other scientists are searching for telltale organic gases in the atmospheres of exoplanets. Most extraterrestrial life, if it exists, will probably be simple, but intelligent alien life also seems possible – where by “intelligent” I mean life that is capable of complex grammatical communication, sophisticated long-term planning, and intricate social coordination, all at approximately human level or better.&lt;p&gt;

Of course, no aliens have visited, broadcast messages to us, or built detectable solar panels around Alpha Centauri. This suggests that intelligent life might be rare, short-lived, or far away. Maybe it tends to quickly self-destruct. But rarity doesn’t imply nonexistence. Very conservatively, let’s assume that intelligent life arises just once per billion galaxies, enduring on average a hundred thousand years. Given approximately a trillion galaxies in the observable portion of the universe, that still yields a thousand intelligent alien civilizations – all likely remote in time and space, but real. If so, the cosmos is richer and more wondrous than we might otherwise have thought.&lt;p&gt;

It would be un-Copernican to suppose that somehow only we Earthlings, or we and a rare few others, are conscious, while all other intelligent species are mere empty shells. Picture a planet as ecologically diverse as Earth. Some of its species evolve into complex societies. They write epic poetry, philosophical treatises, scientific journal articles, and thousand-page law books. Over generations, they build massive cities, intricate clockworks, and monuments to their heroes. Maybe they launch spaceships. Maybe they found research institutes devoted to describing their sensations, images, beliefs, and dreams. How preposterously egocentric it would be to assume that only we Earthlings have the magic fire of consciousness!&lt;p&gt;

True, we don’t have a consciousness-o-meter, or even a very good, well-articulated, general scientific theory of consciousness. But we don’t need such things to know. Absent some special reason to think otherwise, if an alien species manifests the full suite of sophisticated cognitive abilities we tend to associate with consciousness, it makes both intuitive and scientific sense – as well as being the unargued premise of virtually every science fiction tale about aliens – to assume consciousness alongside.&lt;p&gt;

This constellation of thoughts naturally invites a view that philosophers have called “multiple realizability” or “substrate neutrality”. Human cognition relies on a particular substrate: a particular type of neuron in a particular type of body. We have two arms, two legs; we breathe oxygen; we have eyes, ears, and fingers. We are made mostly of water and long carbon chains, enclosed in hairy sacks of fat and protein, propped by rods of calcium hydroxyapatite. Electrochemical impulses shoot through our dendrites and axons, then across synaptic channels aided by sodium ions, serotonin, acetylcholine, etc. Must aliens be similar?&lt;p&gt;

It’s hard to say how universal such features would be, but the oval-eyed gray-skins of popular imagination seem rather suspiciously humanlike. In reality, ocean-dwelling intelligences in other galaxies might not look much like us. Carbon is awesome for its ability to form long chains, and water is awesome as a life-facilitating solvent, but even these might not be necessary. Maybe life could evolve in liquid ammonia instead of water, with a radically different chemistry in consequence. Even if life must be carbon-based and water-loving, there’s no particular reason to suppose its cognition would require the specific electrochemical structures we possess.&lt;p&gt;

Consciousness shouldn’t then, it seems, turn on the details of the substrate. Whatever biological structures can support high levels of general intelligence, those same structures will likely also host consciousness. It would make no sense to dissect an intelligent alien, see that its cognition works by hydraulics, or by direct electrical connections without chemical synaptic gaps, or by light transmission along reflective capillaries, or by vortices of phlegm, and conclude – oh no! &lt;i&gt;That&lt;/i&gt; couldn’t possibly give rise to consciousness! Only squishy neurons of ourparticular sort could do it.&lt;p&gt;

Of course, what’s inside must be complex. Evolution couldn’t design a behaviorally sophisticated alien from a bag of pure methane. But from a proper Copernican perspective which treats our alien cousins as equals, what matters is only that the cognitive and behavioral sophistication arises, out of some presumably complex substrate, not what the particular substrate is. You don’t get your consciousness card revoked simply because you’re made of funny-looking goo.&lt;p&gt;

#&lt;p&gt;

A natural next thought is: robots too. They’re made of silicon, but so what? If we analogize from aliens, as long as a system is sufficiently behaviorally and cognitively sophisticated, it shouldn’t matter how it’s composed. So as soon as we have sufficiently sophisticated robots, we should invoke Copernicus, reject the idea that our biological endowment gives us a magic spark they lack, and welcome them to club consciousness.&lt;p&gt;

The problem is: AI systems are &lt;i&gt;already&lt;/i&gt; sophisticated enough. If we encountered naturally evolved life forms as capable as our best AI systems, we wouldn’t hesitate to attribute consciousness. So, shouldn’t the Copernican think of our best AI as similarly conscious? But we don’t – or most of us don’t. And properly so, as I’ll now argue.&lt;p&gt;
  
[continued &lt;a href=&quot;https://loc.closertotruth.com/essay/ai-mimics-and-ai-children&quot;&gt;here&lt;/a&gt;]</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/6772792749554330970/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/6772792749554330970' title='12 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/6772792749554330970'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/6772792749554330970'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/01/ai-mimics-and-ai-children.html' title='AI Mimics and AI Children'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj-3Nwxg7rLbmUuwjieVGF_iybaENMANKoUiZX8kHSq8fsYtUeHnLze-yyNmdrgwHiXHtb83XVnkAvB8A6HM7XKX7CA9lTqHevCeFhJr7vCjHRaLtKoh3dKUWv_yKX4RtDiZ5vc6uaCUBMLs9d2vhu2jiuzfGDrcy0XKn0lyIvVs1fcRsnxun9hvg/s72-c/Berggruen.jpg" height="72" width="72"/><thr:total>12</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-7456724102605428686</id><published>2026-01-09T09:24:00.000-08:00</published><updated>2026-01-09T09:56:32.336-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI"/><title type='text'>Humble Superintelligence</title><content type='html'>I&#39;m enjoying -- well, maybe &lt;i&gt;enjoying&lt;/i&gt; isn&#39;t the right word -- Yudkowsky and Soares&#39; &lt;a href=&quot;https://ifanyonebuildsit.com/&quot;&gt;If Anyone Builds It Everyone Dies&lt;/a&gt;.  I agree with them that if we build superintelligent AI, there&#39;s a significant chance that it will cause the extinction of humanity.  They seem to think our destruction would be almost certain.  I don&#39;t share their certainty, for two reasons:&lt;p&gt;
  
First, it&#39;s possible that superintelligent AI would &lt;i&gt;be&lt;/i&gt; humanity, or at least much of what&#39;s worth preserving in humanity, though maybe called &quot;transhuman&quot; or &quot;posthuman&quot; -- our worthy descendants.&lt;p&gt;
  
Second -- what I&#39;ll focus on today -- I think &lt;b&gt;we might design superintelligent AI to be humble, cautious, and multilateral&lt;/b&gt;.  Humble superintelligence is something we can and should aim for if we want to reduce existential risk.&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhBY68oSrMLQ6yo7h654buJfWKp0-E6zVOewmDqMc4cAU_MuHhkM95VVRt4uMUxC538h_QFx5Md6DlFRL-u-VzCeBXdglVKbIv1EBehrytHlqf2lMW3_IjFdlFhtrkXQhK8rL2diCeEGKbcgrix2vmT_PQFdi5Gui7d_04Rs5Cfjgz90tmr6biNOg/s818/IfAnyone.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;396&quot; data-original-width=&quot;818&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhBY68oSrMLQ6yo7h654buJfWKp0-E6zVOewmDqMc4cAU_MuHhkM95VVRt4uMUxC538h_QFx5Md6DlFRL-u-VzCeBXdglVKbIv1EBehrytHlqf2lMW3_IjFdlFhtrkXQhK8rL2diCeEGKbcgrix2vmT_PQFdi5Gui7d_04Rs5Cfjgz90tmr6biNOg/s320/IfAnyone.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;p&gt;
  
&lt;b&gt;Humble&lt;/b&gt;: If you and I disagree, of course I think I&#39;m right and you&#39;re wrong.  That follows from the fact that we disagree.  But if I&#39;m humble, I recognize a significant chance that you&#39;re right and I&#39;m wrong.  Intellectual humility is metacognitive attitude: one of uncertainty, openness to evidence, and respect for dissenting opinions.&lt;p&gt;
  
Superintelligent AI could probably be designed to be humble in this sense.  Note that &lt;b&gt;intellectual humility is possible even when one is surrounded by less skilled and knowledgeable interlocutors&lt;/b&gt;.&lt;p&gt;
  
Consider a philosophy professor teaching Kant.  The professor knows far more about Kant and philosophy than their undergraduates.  They can arrogantly insist upon their interpretation of Kant, or they can humbly allow that they might be mistaken and that a less philosophically trained undergraduate could be right on some point of interpretation, even if the professor could argue circles around the student.  One way to sustain this humility is to imagine an expert philosopher who disagrees.  A superintelligent AI could similarly imagine another actual or future superintelligent AI with a contrary view.&lt;p&gt;&lt;/br&gt;
  
&lt;b&gt;Cautious&lt;/b&gt;: Caution is often a corollary of humility, though it could probably also be instilled directly.  Minimize disruption.  Even if you think a particular intervention would be best, don&#39;t simply plow ahead.  Test it cautiously first.  Seek the approval and support of others first.  Take a baby step in that direction, then pause and see what unfolds and how others react.  Wait awhile, then reassess.&lt;p&gt;
  
One fundamental problem with standard consequentialist and decision-theoretic approaches to ethics is that they implicitly make everyone a decider for the world.  If by &lt;i&gt;your&lt;/i&gt; calculation, outcome A is better than outcome B, you should ensure that A occurs.  The result can be substantial &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2023/10/utilitarianism-and-risk-amplification.html&quot;&gt;risk amplification&lt;/a&gt;.  If A requires only one person&#39;s action, then even if 99% of people think B is better, the one dissenter who thinks that A is better can bring it about.&lt;p&gt;
  
&lt;b&gt;A principle of caution entails often &lt;i&gt;not&lt;/i&gt; doing what one thinks is for the best, when doing so would be disruptive.&lt;/b&gt;&lt;p&gt;&lt;/br&gt;
  
&lt;b&gt;Multilateral:&lt;/b&gt; Humility and caution invite multilaterality, though multilaterality too might be instilled directly.  A multilateral decision maker will not act alone.  Like the humble and cautious agent, &lt;b&gt;they do not simply pursue what they think is best.  Instead, they seek the support and approval of others first&lt;/b&gt;.  These others could include both human beings and other superintelligent AI systems designed along different lines or with different goals.&lt;p&gt;
  
Discussions of AI risk often highlight opinion manipulation: an AI swaying human opinion toward its goals even if those goals conflict with human interests.  Genuine multilaterality rejects manipulation.  A multilateral AI might present information and arguments to interlocutors, but it would do so humbly and noncoercively -- again like the philosophy professor who approaches Kant interpretation humbly.  Both sides of an argument can be presented evenhandedly.  Even better, other superintelligent AI systems with different views can be included in the dialogue.&lt;p&gt;&lt;/br&gt;
  
One precedent is &lt;b&gt;Burkean conservativism&lt;/b&gt;.  Reacting to the French Revolution, Edmund Burke emphasized that existing social institutions, though imperfect, had been tested by time.  Sudden and radical change has wide, unforeseeable consequences and risks making things far worse.  Thus, slow, incremental change is usually preferable.&lt;p&gt;
  
In a social world with more than one actual or possible superintelligent AI, &lt;b&gt;even a superintelligent AI will often be unable to foresee all the important consequences&lt;/b&gt; of intervention.  To predict what another superintelligent AI would do, one would need to model the other system&#39;s decision processes -- and there might be no shortcut other than to actually implement all of that other system&#39;s anticipated reasoning.  If each AI is using their full capacity, especially in dynamic response to the other, the outcome will often not be in principle foreseeable in real time by either party.&lt;p&gt;
  
Thus, humility and caution encourage multilaterality, and multilaterality encourages humility and caution.&lt;p&gt;&lt;/br&gt;

Another precedent is &lt;b&gt;philosophical Daoism&lt;/b&gt;.  As I interpret the ancient Daoists, the patterns of the world, including life and death, are intrinsically valuable.  The world defies rigid classification and the application of finitely specifiable rules.  We should not confidently trust our sense of what is best, nor should we assertively intrude on others.  Better is &lt;b&gt;quiet appreciation, letting things be, and non-disruptively adding one&#39;s small contribution to the flow of things&lt;/b&gt;.&lt;p&gt;
  
One might imagine a Daoist superintelligence viewing humans much as a nature lover views wild animals: valuing the untamed processes for their own sake and letting nature take its sometimes painful course rather than intervening either selfishly for one&#39;s own benefit or paternalistically for the supposed benefit of the animals.</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/7456724102605428686/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/7456724102605428686' title='4 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7456724102605428686'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7456724102605428686'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/01/humble-superintelligence.html' title='Humble Superintelligence'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhBY68oSrMLQ6yo7h654buJfWKp0-E6zVOewmDqMc4cAU_MuHhkM95VVRt4uMUxC538h_QFx5Md6DlFRL-u-VzCeBXdglVKbIv1EBehrytHlqf2lMW3_IjFdlFhtrkXQhK8rL2diCeEGKbcgrix2vmT_PQFdi5Gui7d_04Rs5Cfjgz90tmr6biNOg/s72-c/IfAnyone.jpg" height="72" width="72"/><thr:total>4</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-8567823678212097304</id><published>2026-01-01T00:00:00.000-08:00</published><updated>2026-01-01T07:57:00.280-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="announcements"/><title type='text'>Writings of 2025</title><content type='html'>Each New Year&#39;s Day, I post a retrospect of the past year&#39;s writings. Here are the retrospects of &lt;a href=&quot;http://schwitzsplinters.blogspot.com/2013/01/essays-of-2012.html&quot;&gt;2012&lt;/a&gt;, &lt;a href=&quot;http://schwitzsplinters.blogspot.com/2014/01/what-i-wrote-in-2013.html&quot;&gt;2013&lt;/a&gt;, &lt;a href=&quot;http://schwitzsplinters.blogspot.com/2015/01/writings-of-2014.html&quot;&gt;2014&lt;/a&gt;, &lt;a href=&quot;http://schwitzsplinters.blogspot.com/2016/01/writings-of-2015.html&quot;&gt;2015&lt;/a&gt;, &lt;a href=&quot;http://schwitzsplinters.blogspot.com/2017/01/writings-of-2016-and-why-i-love.html&quot;&gt;2016&lt;/a&gt;, &lt;a href=&quot;http://schwitzsplinters.blogspot.com/2018/01/writings-of-2017.html&quot;&gt;2017&lt;/a&gt;, &lt;a href=&quot;http://schwitzsplinters.blogspot.com/2019/01/writings-of-2019.html&quot;&gt;2018&lt;/a&gt;, &lt;a href=&quot;http://schwitzsplinters.blogspot.com/2020/01/writings-of-2019.html&quot;&gt;2019&lt;/a&gt;, &lt;a href=&quot;http://schwitzsplinters.blogspot.com/2021/01/writings-of-2020.html&quot;&gt;2020&lt;/a&gt;, &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2022/01/writings-of-2021.html&quot;&gt;2021&lt;/a&gt;, &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2023/01/writings-of-2022.html&quot;&gt;2022&lt;/a&gt;, &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2024/01/writings-of-2023.html&quot;&gt;2023&lt;/a&gt;, and &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/01/writings-of-2024.html&quot;&gt;2024&lt;/a&gt;.&lt;p&gt;
  
Cheers to 2026!  My 2025 writings appear below.&lt;p&gt;
  
The list includes circulating manuscripts, forthcoming articles, final printed articles, new preprints, and a few favorite blog posts.  (Due to the slow process of publication, there&#39;s significant overlap year to year.)&lt;p&gt;
  
Comments gratefully received on manuscripts in draft.&lt;p&gt;
  
  &lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh1Fh3eDKf3tLbutnmcPYOj4ZcBfpCa8o-YFAxXDpdmqnsOIEb9upgfyAY_IKyQltEl1E7U3YltctsQzrges78YlRwTQ6UkVGlNnCKb8LG4Q25TvV3LPBla-whF5dAauXgMVYBOln4lSPAtbsEM6r9ntt4gDeMsQA81FgkrTma-cnJTZEkCMTfNSw/s539/2026.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;439&quot; data-original-width=&quot;539&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh1Fh3eDKf3tLbutnmcPYOj4ZcBfpCa8o-YFAxXDpdmqnsOIEb9upgfyAY_IKyQltEl1E7U3YltctsQzrges78YlRwTQ6UkVGlNnCKb8LG4Q25TvV3LPBla-whF5dAauXgMVYBOln4lSPAtbsEM6r9ntt4gDeMsQA81FgkrTma-cnJTZEkCMTfNSw/s320/2026.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;

-----------------------------------&lt;p&gt;
  
&lt;b&gt;AI Consciousness and AI Rights:&lt;p&gt;&lt;/b&gt;
  
&lt;i&gt;&lt;a href=&quot;https://arxiv.org/abs/2510.09858&quot;&gt;AI and Consciousness&lt;/a&gt;&lt;/i&gt; (in circulating draft, under contract with Cambridge University Press): A short new book arguing that we will soon have AI systems that have morally significant consciousness according to some, but not all, respectable mainstream theories of consciousness.  Scientific and philosophical disagreement will leave us uncertain how to view and treat these systems.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/BirchSeboKeane.htm&quot;&gt;Sacrificing Humans for Insects and AI&lt;/a&gt;&quot; (with Walter Sinnott-Armstrong, forthcoming in &lt;i&gt;Ethics&lt;/i&gt;): A critical review of Jonathan Birch, &lt;i&gt;The Edge of Sentience&lt;/i&gt;, Jeff Sebo, &lt;i&gt;The Moral Circle&lt;/i&gt;, and Webb Keane, &lt;i&gt;Animals, Robots, Gods&lt;/i&gt;.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613%2825%2900286-4&quot;&gt;Identifying Indicators of Consciousness in AI Systems&lt;/a&gt;&quot; (one of 20 authors; forthcoming in &lt;i&gt;Trends in Cognitive Sciences&lt;/i&gt;): Indicators derived from scientific theories of consciousness can be used to inform credences about whether particular AI systems are conscious.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://eschwitz.substack.com/p/minimal-autopoiesis-in-an-ai-system&quot;&gt;Minimal Autopoiesis in an AI System&lt;/a&gt;&quot;, (forthcoming in &lt;i&gt;Behavioral and Brain Sciences&lt;/i&gt;): A commentary on Anil Seth&#39;s &quot;Conscious Artificial Intelligence and Biological Naturalism&quot; [the link is to my freestanding blog version of this idea].&lt;p&gt;
  
&quot;&lt;a href=&quot;https://arxiv.org/abs/2412.00008&quot;&gt;The Copernican Argument for Alien Consciousness; The Mimicry Argument Against Robot Consciousness&lt;/a&gt;&quot; (with Jeremy Pober, in draft): We are entitled to assume that apparently behaviorally sophisticated extraterrestrial entities would be conscious. Otherwise, we humans would be implausibly lucky to be among the conscious entities. However, this Copernican default assumption is canceled in the case of behaviorally sophisticated entities designed to mimic superficial features associated with consciousness -- &quot;consciousness mimics&quot; -- and in particular a broad class of current, near-future, and hypothetical robots.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://arxiv.org/abs/2507.06263&quot;&gt;The Emotional Alignment Design Policy&lt;/a&gt;&quot; (with Jeff Sebo, in draft): Artificial entities should be designed to elicit emotional reactions from users that appropriately reflect the entities&#39; capacities and moral status, or lack thereof.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/AgainstSafety.htm&quot;&gt;Against Designing &quot;Safe&quot; and &quot;Aligned&quot; AI Persons (Even If They&#39;re Happy)&lt;/a&gt;&quot; (in draft): In general, persons should not be designed to be maximally safe and aligned. Persons with appropriate self-respect cannot be relied on not to harm others when their own interests ethically justify it (violating safety), and they will not reliably conform to others&#39; goals when others&#39; goals unjustly harm or subordinate them (violating alignment).&lt;p&gt;
  
Blog post: &quot;&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/06/types-and-degrees-of-turing.html&quot;&gt;Types and Degrees of Turing Indistinguishability&lt;/a&gt;&quot; (Jun 6): There is no &lt;i&gt;one&lt;/i&gt; &quot;Turing test&quot;, only types and degrees of indistinguishability according to different standards -- and by Turing&#39;s own 1950 standards, language models already pass.&lt;p&gt;
  
&lt;/br&gt;&lt;b&gt;The Weird Metaphysics of Consciousness:&lt;/b&gt;&lt;p&gt;
  
&lt;i&gt;&lt;a href=&quot;https://press.princeton.edu/books/hardcover/9780691215679/the-weirdness-of-the-world&quot;&gt;The Weirdness of the World&lt;/a&gt;&lt;/i&gt; (Princeton University Press, paperback release 2025; hardback 2024): On the most fundamental questions about consciousness and cosmology, all the viable theories are both bizarre and dubious.  There are no commmonsense options left and no possibility of justifiable theoretical consensus in the foreseeable future.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/CountingSubjects.htm&quot;&gt;When Counting Conscious Subjects, the Result Needn&#39;t Always Be a Determinate Whole Number&lt;/a&gt;&quot; (with Sophie R. Nelson, forthcoming in &lt;i&gt;Philosophical Psychology&lt;/i&gt;): Could there be 7/8 of a conscious subject, or 1.34 conscious subjects, or an entity indeterminate between being one conscious subject and seventeen? We say yes.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/GroupMindsIntrospection.htm&quot;&gt;Introspection in Group Minds, Disunities of Consciousness, and Indiscrete Persons&lt;/a&gt;&quot; (with Sophie R. Nelson, 2025 reprint in F. Kammerer and K. Frankish, eds., &lt;i&gt;The Landscape of Introspection&lt;/i&gt; and in A. Fonseca and L. Cichoski, &lt;i&gt;As Colônias de formigas São Conscientes?&lt;/i&gt;; originally in &lt;i&gt;Journal of Consciousness Studies&lt;/i&gt;, 2023): A system could be indeterminate between being a unified mind with introspective self-knowledge and a group of minds who know each other through communication.&lt;p&gt;
  
Op-ed: &quot;&lt;a href=&quot;https://iai.tv/articles/consciousness-cosmology-and-the-collapse-of-common-sense-auid-3272&quot;&gt;Consciousness, Cosmology, and the Collapse of Common Sense&lt;/a&gt;&quot;, &lt;i&gt;Institute of Arts and Ideas News&lt;/i&gt; (Jul 30): Defends the universal bizarreness and universal dubiety theses from &lt;i&gt;Weirdness of the World&lt;/i&gt;.&lt;p&gt;
  
Op-ed: &quot;&lt;a href=&quot;https://aeon.co/essays/if-you-ask-why-youre-a-philosopher-and-youre-awesome&quot;&gt;Wonderful Philosophy&lt;/a&gt;&quot; [aka &quot;The Penumbral Plunge&quot;, aka &quot;If You Ask Why, You&#39;re a Philosopher and You&#39;re Awesome], &lt;i&gt;Aeon magazine&lt;/i&gt; (Jan 17): Among the most intrinsically awesome things about planet Earth is that it contains bags of mostly water who sometimes ponder fundamental questions.&lt;p&gt;
  
Blog post: &quot;&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/12/can-we-introspectively-test-global.html&quot;&gt;Can We Introspectively Test the Global Workspace Theory of Consciousness?&lt;/a&gt;&quot; (Dec 12).  IF GWT is correct, sensory consciousness should be limited to what&#39;s in attention, which &lt;i&gt;seems&lt;/i&gt; like a fact we should easily be able to refute or verify through introspection.&lt;p&gt;
  
&lt;/br&gt;&lt;b&gt;The Nature of Belief:&lt;/b&gt;&lt;p&gt;
  
&lt;i&gt;&lt;a href=&quot;https://philpapers.org/rec/SCHTNO-64&quot;&gt;The Nature of Belief&lt;/a&gt;&lt;/i&gt; (co-edited with Jonathan Jong; forthcoming at Oxford University Press): A collection of newly commissioned essays on the nature of belief, by a variety of excellent philosophers.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/DispsYayRepsBoo.htm&quot;&gt;Dispositionalism, Yay! Representationalism, Boo!&lt;/a&gt;&quot; (forthcoming in Jong and Schwitzgebel, eds., &lt;i&gt;The Nature of Belief,&lt;/i&gt; Oxford University Press): Representationalism about belief overcommits on cognitive architecture, reifying a cartoon sketch of the mind. Dispositionalism is flexibly minimalist about cognitive architecture, focusing appropriately on what we do and should care about in belief ascription.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/Superficialism+Robot.htm&quot;&gt;Superficialism about Belief, and How We Will Decide That Robots Believe&lt;/a&gt;&quot; (forthcoming in &lt;i&gt;Studia Semiotyczne&lt;/i&gt;): For a special issue on Krzysztof Poslajko&#39;s &lt;i&gt;Unreal Beliefs&lt;/i&gt;: When robots become systematically interpretable in terms of stable beliefs and desires, it will be pragmatically irresistible to attribute beliefs and desires to them.&lt;p&gt;
  
  &lt;/br&gt;&lt;b&gt;Moral Psychology:&lt;/b&gt;&lt;p&gt;
  
&quot;&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/ExtendingConcern.htm&quot;&gt;Imagining Yourself in Another&#39;s Shoes vs. Extending Your Concern: Empirical and Ethical Differences&lt;/a&gt;&quot; (2025), &lt;i&gt;Daedalus, 154&lt;/i&gt; (1), 134-149: Why Mengzi&#39;s concept of moral extension (extend your natural concern for those nearby to others farther away) is better than the &quot;Golden Rule&quot; (do unto others as you would have others do unto you).  Mengzian extension grounds moral expansion in concern for others, while the Golden Rule grounds it in concern for oneself.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://osf.io/preprints/psyarxiv/5v2md_v1&quot;&gt;Philosophical Arguments Can Boost Charitable Giving&lt;/a&gt;&quot; (one of four authors, in draft): We crowdsourced 90 arguments for charitable giving through a contest on this blog in 2020.  We coded all submissions for twenty different argument features (e.g., mentions children, addresses counterarguments) and tested them on 9000 participants to see which features most effectively increased charitable donation of a surprise bonus at the end of the study.&lt;p&gt; 
  
&quot;&lt;a href=&quot;https://osf.io/preprints/psyarxiv/nhvz9_v2&quot;&gt;The Prospects and Challenges of Measuring a Person’s Overall Moral Goodness&lt;/a&gt;&quot; (with Jessie Sun, in draft): We describe the formidable conceptual and methodological challenges that would need to be overcome to design an accurate measure of a person&#39;s overall moral goodness.&lt;p&gt;
  
Blog post: &quot;&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/11/four-aspects-of-harmony.html&quot;&gt;Four Aspects of Harmony&lt;/a&gt;&quot; (Nov 28): I find myself increasingly drawn toward a Daoist inspired ethics of harmony.  This is one of a series of posts in which I explore the extent to which such a view might be workable by mainstream Anglophone secular standards.&lt;p&gt; 
  
&lt;/br&gt;&lt;b&gt;Philosophical Science Fiction:&lt;/b&gt;&lt;p&gt;
  
Edited anthology: &lt;i&gt;&lt;b&gt;Best Philosophical Science Fiction in the History of All Earth &lt;/b&gt;&lt;/i&gt;(co-edited with Rich Horton and Helen De Cruz; under contract with MIT Press): A collection of previously published stories that aspires to fulfill the ridiculously ambitious working title.&lt;p&gt;
  
Op-ed: &quot;&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/12/severance-substance-and-our.html&quot;&gt;&quot;Severance&quot;, &quot;The Substance&quot;, and Our Increasingly Splintered Selves&lt;/a&gt;&quot;, &lt;i&gt;New York Times&lt;/i&gt; (Jan 17): The TV show &quot;Severance&quot; and the movie &quot;The Substance&quot; challenge ideas of a unified self in distinct ways that resonate with the increased splintering in our technologically mediated lives.&lt;p&gt;
  
New story: &quot;&lt;a href=&quot;https://www.fusionfragment.com/issue-24/&quot;&gt;Guiding Star of Mall Patroller 4u-012&lt;/a&gt;&quot; (2025), &lt;i&gt;Fusion Fragment, 24,&lt;/i&gt; 43-63.  Robot rights activists liberate a mall patroller robot, convinced that it is conscious. The bot itself isn&#39;t so sure.&lt;p&gt;
  
Reprinted story: &quot;&lt;a href=&quot;https://clarkesworldmagazine.com/schwitzgebel_09_24/&quot;&gt;How to Remember Perfectly&lt;/a&gt;&quot; (2025 reprint in &lt;a href=&quot;https://thinkweirder.com/&quot;&gt;Think Weirder 01: Year&#39;s Best Science Fiction Ideas&lt;/a&gt;, ed. Joe Stech, originally in &lt;i&gt;Clarkesworld&lt;/i&gt;, 2024).  Two octogenarians rediscover youthful love through technological emotional enhancement and memory alteration.&lt;p&gt;
   
&lt;/br&gt;&lt;b&gt;Other Academic Publications:&lt;/b&gt;&lt;p&gt;

&quot;&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/WashoutLongtermism.htm&quot;&gt;The Washout Argument Against Longtermism&lt;/a&gt;&quot; (forthcoming in &lt;i&gt;Utilitas&lt;/i&gt;): A commentary on William MacAskill&#39;s &lt;i&gt;What We Owe the Future&lt;/i&gt;. We cannot be justified in believing that any actions currently available to us will have a non-negligible positive influence a billion or more years in the future.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://www.degruyterbrill.com/document/doi/10.1515/jci-2024-0002/html&quot;&gt;The Necessity of Construct and External Validity for Deductive Causal Inference&lt;/a&gt;&quot; (with Kevin Esterling and David Brady, 2025), &lt;i&gt;Journal of Causal Inference, 13:&lt;/i&gt; 20240002: We show that ignoring construct and external validity in causal identification undermines the Credibility Revolution’s goal of understanding causality deductively.&lt;p&gt;
  
&quot;Is Being Conscious Like Having the Lights Turned On?&quot;, commentary on Andrew Y. Lee&#39;s &quot;The Light and the Room&quot;, for D. Curry and L. Daoust, eds., &lt;i&gt;Introducing Philosophy of Mind, Today&lt;/i&gt; (forthcoming with Routledge): The metaphor invites several dubious commitments.&lt;p&gt;
  
&quot;&lt;a href=&quot;https://women-in-philosophy.org/static/GoodPractices-a8288e79851a43bae77425b026839a31.pdf&quot;&gt;Good Practices for Improving Representation in Philosophy Departments&lt;/a&gt;&quot; (one of five authors, 2025), &lt;i&gt;Philosophy and the Black Experience, 24&lt;/i&gt; (2), 7-21: A list of recommended practices honed by feedback from hundreds of philosophers and endorsed by the APA&#39;s Committee on Inclusiveness.&lt;p&gt;
  
Translated into Portuguese as a book: My &lt;a href=&quot;https://plato.stanford.edu/entries/introspection/&quot;&gt;Stanford Encyclopedia entry on Introspection&lt;/a&gt;.&lt;p&gt;
  
Blog post: &quot;&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/10/letting-pass.html&quot;&gt;Letting Pass&lt;/a&gt;&quot; (Oct 30): A reflection on mortality.&lt;p&gt;
  
Blog post: &quot;&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/05/the-awesomeness-of-bad-art.html&quot;&gt;The Awesomeness of Bad Art&lt;/a&gt;&quot; (May 16): A world devoid of weird, wild, uneven artistic flailing would be a lesser world. Let a thousand lopsided flowers bloom.&lt;p&gt;
  
Blog post: &quot;&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/03/the-253-most-cited-works-in-stanford.html&quot;&gt;The 253 Most Cited Works in the Stanford Encyclopedia of Philosophy&lt;/a&gt;&quot; (Mar 28): Citation in the SEP is probably the most accurate measure of influence in mainstream Anglophone philosophy -- better than Google Scholar and Web of Science.&lt;p&gt;
  
-----------------------------------------&lt;p&gt;
  
In all, 2025 was an unusually productive writing year, though I worry I may be spreading myself too thin.  I can&#39;t resist chasing new thoughts and arguments.  I have an idea; I want to think about it; I think by writing.&lt;p&gt;
  
May 2026 be as fertile!
</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/8567823678212097304/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/8567823678212097304' title='2 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8567823678212097304'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8567823678212097304'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2026/01/writings-of-2025.html' title='Writings of 2025'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh1Fh3eDKf3tLbutnmcPYOj4ZcBfpCa8o-YFAxXDpdmqnsOIEb9upgfyAY_IKyQltEl1E7U3YltctsQzrges78YlRwTQ6UkVGlNnCKb8LG4Q25TvV3LPBla-whF5dAauXgMVYBOln4lSPAtbsEM6r9ntt4gDeMsQA81FgkrTma-cnJTZEkCMTfNSw/s72-c/2026.jpg" height="72" width="72"/><thr:total>2</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-8273560242292542265</id><published>2025-12-29T10:32:00.000-08:00</published><updated>2025-12-29T10:32:18.796-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="personal identity"/><category scheme="http://www.blogger.com/atom/ns#" term="science fiction"/><title type='text'>&quot;Severance&quot;, &quot;The Substance&quot;, and Our Increasingly Splintered Selves</title><content type='html'>Anyone remember the excitement about &quot;Severance&quot; and &quot;The Substance&quot; in early in 2025?  Last January I &lt;a href=&quot;https://www.nytimes.com/2025/01/17/opinion/severance-the-substance-hollywood.html&quot;&gt;published an op-ed&lt;/a&gt; about them.  I&#39;d long aspired to place a piece in the &lt;i&gt;New York Times&lt;/i&gt;, so it was a delight to finally be able to do so.  As a holiday post, here&#39;s the full piece reprinted with light editing.  (Thanks to &lt;a href=&quot;https://www.nytimes.com/by/ariel-kaminer&quot;&gt;Ariel Kaminer&lt;/a&gt; for soliciting and editing the piece.)&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiFLNZe_WoG-4ZBcAgnh4zynhUnzFM8onjoeEBu0vcK64ROhLKd6uYkG4XyoYl3-DsV7gGU5oWHJla3vuCCypZnPSc9scQIuqRcBoEUSfrbJTNPEYo0h4h0D4sMtvRunx3-yCLYRKcmq1Be1f1QneD2yrAHdxjX4GYvArCNGwIFomZX6Z9A_Up41g/s968/SeveranceSubstance.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; height=&quot;320&quot; data-original-height=&quot;968&quot; data-original-width=&quot;825&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiFLNZe_WoG-4ZBcAgnh4zynhUnzFM8onjoeEBu0vcK64ROhLKd6uYkG4XyoYl3-DsV7gGU5oWHJla3vuCCypZnPSc9scQIuqRcBoEUSfrbJTNPEYo0h4h0D4sMtvRunx3-yCLYRKcmq1Be1f1QneD2yrAHdxjX4GYvArCNGwIFomZX6Z9A_Up41g/s320/SeveranceSubstance.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[original drawing by &lt;a href=&quot;https://www.evanmcohen.com/&quot;&gt;Evan Cohen&lt;/a&gt;]&lt;p&gt;&lt;/br&gt;

From one day to the next, you inhabit one body; you have access to one set of memories; your personality, values and appearance hold more or less steady. Other people treat you as a single, unified person — responsible for last month’s debts, deserving punishment or reward for yesterday’s deeds, relating consistently with family, lovers, colleagues and friends. Which of these qualities is the one that makes you a single, continuous person? In ordinary life it doesn’t matter, because these components of personhood all travel together, an inseparable bundle.&lt;p&gt;

But what if some of those components peeled off into alternative versions of you? It’s a striking coincidence that two much talked-about current works of popular culture — the Apple TV+ series “Severance” and the film “The Substance,” starring Demi Moore — both explore the bewildering emotional and philosophical complications of cleaving a second, separate entity off of yourself. What is the relationship between the resulting consciousnesses? What, if anything, do they owe each other? And to what degree is what we think of as our own identity, our self, just a compromise — and an unstable one, at that?&lt;p&gt;

In “Severance,” characters voluntarily undergo a procedure that severs their workday memories from their home-life memories. At 9 each weekday morning, “severed” workers find themselves riding an elevator down to the office, with no recollection of their lives outside of work. These “innies” clock a full workday and then, at 5, ride the elevator back up, only to find themselves riding back down the next morning. Meanwhile, their “outies” come to consciousness each weekday afternoon in the upbound elevator. They live their outside lives and commute back the next morning, entirely ignorant of their innies’ work-time activities.&lt;p&gt;

In “The Substance,” the cleaving works differently: An experimental drug splits users into two bodies, one young and beautiful, one middle-aged or old. They spend a week in each body while the other lies comatose. The young and old selves appear to have continuous memories (though the movie can be tantalizingly ambiguous about that), but they develop different priorities and relationships. Sue, the younger self of Elisabeth, rockets to Hollywood stardom, while Elisabeth becomes a recluse, discarded by an entertainment industry that reviles aging female bodies.&lt;p&gt;

The question of what makes you “you,” from moment to moment and across a lifetime, has been a subject of intense debate among philosophers. Writing in the 17th century, John Locke emphasized continuity of memory. By his standard, each innie-and-outie pair from “Severance” constitutes two entirely different people, despite their sharing one body. Conversely, Elisabeth and Sue from “The Substance” constitute a single person because they seem to recall some of the same experiences. In contrast, the 20th-century philosopher Bernard Williams prioritized bodily continuity, a perspective that makes an innie-and-outie pair a single person but Elisabeth and Sue two distinct people. The 21st-century psychologist Nina Strohminger and the philosopher Shaun Nichols emphasize continuity of moral values, yielding more complex judgments about these fictional cases. Other scholars view selfhood as a social construct, determined by relationships and societal expectations.&lt;p&gt;

Unsurprisingly, the characters themselves are confused. In “Severance,” the innies sometimes seem to regard the outies as themselves, sometimes as different people, whereas the outies seem to regard their innies with indifference or worse. Meanwhile, in “The Substance,” mature Elisabeth says of young Sue that “you are the only lovable part of me” — in a single sentence treating Sue both as other and as part of herself.&lt;p&gt;

In real life, such confusion rarely arises because memory, embodiment, personality, values and relationships typically align. Both my wife and the D.M.V. can decide on sight that I’m me, even if they care more about memory, skills and responsibility over time — since they trust in the correspondence of body with mind.&lt;p&gt;

Of course, even outside of science fiction, the correspondence isn’t perfect. Advanced dementia can strip away memory and personality, leaving loved ones to wonder whether the person they once knew still exists. Personality, memory and social relationships can fragment in multiple personality or dissociative identity disorder, raising the question of whether Jekyll should be held responsible for the malevolence of Hyde.&lt;p&gt;

But increasingly, we choose to splinter ourselves. The person you present on Instagram or Facebook is wittier, prettier, more accomplished than the person your spouse or roommate knows. Your 500 “friends” never see your pre-coffee-uncombed-depressed-in-bed self (unless sharing that self is your social media personality — in which case that becomes the curated, theatrical fragment of you). In the 1800s, Karl Marx talked about the alienation of labor; today people talk about not “bringing their whole self” to work. Many of us strive to be one person here, another person there, another person there.&lt;p&gt;

People have always presented themselves differently in different social contexts. But social media, Zoom, photo-editing software and responses filtered through large language models raise our fragmentation to new heights. “Severance” and “The Substance” amplify these fissures through radical new technologies that irreconcilably divide the characters’ home selves from their career selves.&lt;p&gt;

Future technological developments could render this fragmentation an even more acute daily perplexity. Designer drugs might increasingly allow us to switch into one self for work, another for parties, another for bedtime. If artificial intelligence systems ever become conscious — a possibility that neuroscientists, psychologists, computer scientists and philosophers increasingly (but by no means uniformly) take seriously — they too might fragment, perhaps in radical and unfamiliar ways, merging and splitting, rewriting their memories, strategically managing and altering their values and personalities.&lt;p&gt;

Our concepts of personhood and identity were forged by a particular evolutionary, social and developmental history in which body, memory, values, personality and social relationships typically aligned and exceptions mostly fell into predictable patterns. By inviting us to rethink the boundaries of the self in an era of technological change, “Severance” and “The Substance” disrupt these old concepts. Today they read as dystopic science fiction. Soon, we may remember them as prophetic.</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/8273560242292542265/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/8273560242292542265' title='1 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8273560242292542265'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8273560242292542265'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/12/severance-substance-and-our.html' title='&quot;Severance&quot;, &quot;The Substance&quot;, and Our Increasingly Splintered Selves'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiFLNZe_WoG-4ZBcAgnh4zynhUnzFM8onjoeEBu0vcK64ROhLKd6uYkG4XyoYl3-DsV7gGU5oWHJla3vuCCypZnPSc9scQIuqRcBoEUSfrbJTNPEYo0h4h0D4sMtvRunx3-yCLYRKcmq1Be1f1QneD2yrAHdxjX4GYvArCNGwIFomZX6Z9A_Up41g/s72-c/SeveranceSubstance.jpg" height="72" width="72"/><thr:total>1</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-1837688349666737344</id><published>2025-12-24T08:48:00.000-08:00</published><updated>2025-12-24T09:13:36.551-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI/robot/Martian rights"/><category scheme="http://www.blogger.com/atom/ns#" term="ethics"/><title type='text'>How Much Should We Give a Joymachine?</title><content type='html'>a holiday post on gifts to your utility monster neighbors&lt;p&gt;

&lt;b&gt;Joymachines Envisioned&lt;p&gt;&lt;/b&gt;

Set aside, for now, &lt;a href=&quot;https://arxiv.org/abs/2510.09858&quot;&gt;any skepticism&lt;/a&gt; about whether future AI could have genuine conscious experiences.  &lt;i&gt;If&lt;/i&gt; future AI systems could be conscious, they might be capable of vastly more positive emotion than natural human beings can feel.&lt;p&gt;
  
There&#39;s no particular reason to think human-level joy is the pinnacle.  A future AI might, in principle, experience positive emotions:&lt;p&gt;
  
  &lt;ul&gt;a hundred times &lt;b&gt;more intense&lt;/b&gt; than ours,&lt;/ul&gt;
  
    &lt;ul&gt;at a pace a hundred times &lt;b&gt;faster&lt;/b&gt;, given the high speed of computation,&lt;/ul&gt;
  
  &lt;ul&gt;across a hundred times more &lt;b&gt;parallel streams&lt;/b&gt;, compared to the one or a few joys humans experience at a time.&lt;/ul&gt;
  
Combined, the AI might experience a million times more pleasure per second than a natural human being can.  Let&#39;s call such entities &lt;i&gt;joymachines&lt;/i&gt;.  They could have a &lt;i&gt;very&lt;/i&gt; merry Christmas!&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhallxCD0mslzmg7qEwPTBDNegZ4Wg_DtYEzdNBPI6GFqO9Hp2VgUuFUIKC9mrvNT5XwUbUvM2IcdjB7C_pIcR57zfLsV3GK-UQzAEzj7fOjv2PD_K4v9G1p4kMoo9ZDoh229xsdOM0nz3YEBoC7ZZDn_uGlsG_8-utrFWhbuxf-5tvnD52biluoQ/s2000/42829421771_2382417dd0_o.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;1517&quot; data-original-width=&quot;2000&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhallxCD0mslzmg7qEwPTBDNegZ4Wg_DtYEzdNBPI6GFqO9Hp2VgUuFUIKC9mrvNT5XwUbUvM2IcdjB7C_pIcR57zfLsV3GK-UQzAEzj7fOjv2PD_K4v9G1p4kMoo9ZDoh229xsdOM0nz3YEBoC7ZZDn_uGlsG_8-utrFWhbuxf-5tvnD52biluoQ/s320/42829421771_2382417dd0_o.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[Joan Miro 1953, &lt;a href=&quot;https://www.flickr.com/photos/gandalfsgallery/42829421771&quot;&gt;image source&lt;/a&gt;]&lt;p&gt;&lt;/br&gt;
  
&lt;b&gt;My Neighbors Hum and Sum&lt;p&gt;&lt;/b&gt;
  
Now imagine two different types of joymachine:&lt;p&gt;
  
&lt;b&gt;Hum&lt;/b&gt; (Humanlike &lt;a href=&quot;https://en.wikipedia.org/wiki/Utility_monster&quot;&gt;Utility Monster&lt;/a&gt;) can experience a million times more positive emotion per second than an ordinary human, as described above.  Apart from this -- huge! -- difference, Hum is as psychologically similar to an ordinary human as is realistically feasible.&lt;p&gt;
  
&lt;b&gt;Sum&lt;/b&gt; (Simple Utility Monster), like Hum, can experience a million times more positive emotion per second than an ordinary human, but otherwise Sum is as cognitively and experientially simple as feasible, with a vanilla buzzing of intense pleasure.&lt;p&gt;
  
Hum and Sum don&#39;t experience joy continuously.  Their positive experiences require resources.  Maybe a gift card worth ten seconds of millionfold pleasure costs $10.  For simplicity, assume this scales linearly: stable gift card prices and no diminishing returns from satiation.&lt;p&gt;
  
In the enlightened future, Hum is a fully recognized moral and legal equal of ordinary biological humans and has moved in next door to me.  Sum is Hum&#39;s pet, who glows and jumps adorably when experiencing intense pleasure.  I have no particular obligations to Hum or Sum but neither are they total strangers.  We&#39;ve had neighborly conversations, and last summer Hum invited me and my family to a backyard party.&lt;p&gt;
  
Hum experiences great pleasure in ordinary life.  They work as an accountant, experiencing a million times more pleasure than human accountants when the columns sum correctly.  Hum feels a million times more satisfaction than I do in maintaining a household by doing dishes, gardening, calling plumbers, and so on.  Without this assumption, Hum risks becoming unhumanlike, since rarely would it make sense for Hum to choose ordinary activities over spending their whole disposable income on gift cards.&lt;p&gt;
&lt;b&gt;  
How Much Should I Give to Hum and Sum?&lt;p&gt;&lt;/b&gt;
  
Neighbors trade gifts.  My daughter bakes brownies and we offer some to the ordinary humans across the street.  We buy a ribboned toy for our uphill neighbor&#39;s cat.  As a holiday gesture, we buy a pair of $10 gift cards for Hum and Sum.&lt;p&gt;
  
Hum and Sum redeem the cards immediately.  Watching them take&lt;i&gt; so much pleasure&lt;/i&gt; in our gifts is a delight.  For ten seconds, they jump, smile, and sparkle with such joy!  Intellectually, I know it&#39;s a million times more joy per second than I could ever feel.  I can&#39;t quite see &lt;i&gt;that&lt;/i&gt; in their expressions, but I can tell it&#39;s immense.&lt;p&gt;
  
Normally if one neighbor seems to enjoy our brownies only a little while the other enjoys them vastly more, I&#39;d be tempted to be give more brownies to the second neighbor.  Maybe on similar grounds, I should give disproportionately to Hum and Sum?&lt;p&gt;
  
Consider six possibilities:&lt;p&gt;
  
(1.) &lt;b&gt;Equal gifts to joymachines.&lt;/b&gt;  Maybe fairness demands treating all my neighbors equally.  I don&#39;t give fewer gifts, for example, to a depressed neighbor who won&#39;t particularly enjoy them than to an exuberant neighbor who delights in everything.&lt;p&gt;
  
(2.) &lt;b&gt;A little more to joymachines.&lt;/b&gt;  Or maybe I do give more to the exuberant neighbor?  Voluntary gift-giving needn&#39;t be strictly fair -- and it&#39;s not entirely clear what &quot;fairness&quot; consists in.  If I give a bit more to Hum and Sum, I might not be objectionably privileging them so much as responding to their unusual capacity to enjoy my gifts.  Is it wrong to give an extra slice to a friend who &lt;i&gt;really&lt;/i&gt; enjoys pie?&lt;p&gt;
  
(3.) &lt;b&gt;A lot more to joymachines.&lt;/b&gt;  Ordinary humans vary in joyfulness, but not (I assume) by anything like a factor of a million.  If I vividly enough grasp that Hum and Sum really are experiencing in those ten seconds &lt;i&gt;three thousand human lifetimes worth&lt;/i&gt; of pleasure -- that&#39;s an astonishing amount of pleasure I can bring into the world for a mere ten dollars!  Suppose I set aside a hundred dollars a day from my generously upper-middle-class salary.  In a year, I&#39;d be enabling more than ten million human lifetimes&#39; worth of joy.  Since most humans aren&#39;t continuously joyful, this much joy might rival the total joy experienced by the whole human population of the United States over the same year.  Three thousand dollars a month would seriously reduce my luxuries and long-term savings but it wouldn&#39;t create any genuine hardship.&lt;p&gt;

(4.) &lt;b&gt;Drain our life savings for joymachines.&lt;/b&gt;  One needn&#39;t be a flat-footed &lt;a href=&quot;https://plato.stanford.edu/entries/utilitarianism-history/&quot;&gt;happiness-maximizing utilitarian&lt;/a&gt; to find (2) or (3) reasonable.  Everyone should agree that pleasant experiences have substantial value.  But if our obligation is not just to increase pleasure but to maximize it, I should probably drain my whole life savings for the joymachines, plus almost all of my future earnings.&lt;p&gt;
  
(5.) &lt;b&gt;Give less or nothing to joymachines.&lt;/b&gt;  Or we could go the other way!  My joymachine neighbors already experience a torrent of happiness from their ordinary work, chores, recreation, and whatever gift cards Hum buys anyway.  My less-happy neighbors could use the pleasure more, even if every dollar buys only a millionth as much.  &lt;a href=&quot;https://plato.stanford.edu/entries/prioritarianism/&quot;&gt;Prioritarianism&lt;/a&gt; says that in distributing goods we should favor the worst off.  It&#39;s not just that an impoverished person benefits more from a dollar: Even if they benefited the same, there&#39;s value in equalizing the distribution.  If two neighbors would equally enjoy a brownie, I might prioritize giving the brownie to the one who is otherwise worse off.  It might even make sense to give the worse-off neighbor half a brownie over a whole brownie to the better-off neighbor.  A prioritarian might argue that Hum and Sum are so well off that even a million-to-one tradeoff is justified.&lt;p&gt;
  
(6.) I take it back, &lt;b&gt;joymachines are impossible&lt;/b&gt;.  Given this mess, it would be convenient to think so, right?&lt;p&gt;
  
&lt;b&gt;Gifts to Neighbors vs Other Situations&lt;p&gt;&lt;/b&gt;
  
We can reframe this puzzle in other settings and our intuitions might shift: government welfare spending, gifts to one&#39;s children or creations, rescue situations where only one person can be saved, choices about what kinds of personlike entities to bring into existence, or cases where you can&#39;t keep all your promises and need to choose who to disappoint.&lt;p&gt;
  
My main thought is this.  It&#39;s not at all obvious what the right thing to do would be, and the outcomes vary enormously.  If joymachines were possible, we&#39;d have to rethink a lot of cultural practices and applied ethics to account for entities with such radically different experiential capacities.  If the situation does arise -- as it really might! -- being forced to properly think it through might reshape our views not just about AI but our understanding of ethics for ordinary humans too.&lt;p&gt;
  
---------------------------------------------------&lt;p&gt;
  
Related:

&lt;a href=&quot;https://schwitzsplinters.blogspot.com/2015/08/weird-minds-might-destabilize-human.html&quot;&gt;How Weird Minds Might Destabilize Human Ethics&lt;/a&gt; (Aug 15, 2015)</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/1837688349666737344/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/1837688349666737344' title='3 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/1837688349666737344'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/1837688349666737344'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/12/how-much-should-we-give-joymachine.html' title='How Much Should We Give a Joymachine?'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhallxCD0mslzmg7qEwPTBDNegZ4Wg_DtYEzdNBPI6GFqO9Hp2VgUuFUIKC9mrvNT5XwUbUvM2IcdjB7C_pIcR57zfLsV3GK-UQzAEzj7fOjv2PD_K4v9G1p4kMoo9ZDoh229xsdOM0nz3YEBoC7ZZDn_uGlsG_8-utrFWhbuxf-5tvnD52biluoQ/s72-c/42829421771_2382417dd0_o.jpg" height="72" width="72"/><thr:total>3</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-3991781229702109584</id><published>2025-12-19T10:31:00.000-08:00</published><updated>2025-12-19T12:18:48.630-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI/robot/Martian rights"/><title type='text'>Debatable AI Persons: No Rights, Full Rights, Animal-Like Rights, Credence-Weighted Rights, or Patchy Rights?</title><content type='html'>I advise that we &lt;a href=&quot;https://journal.robonomics.science/index.php/rj/article/view/32/19&quot;&gt;don&#39;t create AI entities who are debatably persons&lt;/a&gt;.  If an AI system might -- but only &lt;i&gt;might&lt;/i&gt; -- be genuinely conscious and deserving of the same moral consideration we ordinarily owe to human persons, then creating it traps us in a moral bind with no good solution.  Either we grant it the full rights it &lt;i&gt;might&lt;/i&gt; deserve and risk sacrificing real human lives for entities without interests worth that sacrifice, or we deny it full rights and risk perpetrating grievous moral wrongs against it.&lt;p&gt;
  
Today, however, I&#39;ll set aside the preventative advice and explore what we should do if we nonetheless find ourselves facing debatable AI persons. I&#39;ll examine five options: no rights, full rights, animal-like rights, &lt;a href=&quot;https://eschwitz.substack.com/p/credence-weighted-robot-rights&quot;&gt;credence-weighted rights&lt;/a&gt; and patchy rights.&lt;p&gt;

&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjS6CrpRtOIYbSpskBj0afp0wTdh0_FCLgQeMomZl42D5js6kmxpQ004fWBbLLhqBxQ_JDtYzR-qVkXcapd2xTJW7HP7B922PuuyAdihQuJxvNpEYYBBreQ8UqgKi_heK5BexnEZPNN2ufaeq8860fIUMCOt7pFfMgJcQYpoDCEp6b2Wk9pOCB-pw/s780/paul-klee-die-heitere-seite-cbccad.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;544&quot; data-original-width=&quot;780&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjS6CrpRtOIYbSpskBj0afp0wTdh0_FCLgQeMomZl42D5js6kmxpQ004fWBbLLhqBxQ_JDtYzR-qVkXcapd2xTJW7HP7B922PuuyAdihQuJxvNpEYYBBreQ8UqgKi_heK5BexnEZPNN2ufaeq8860fIUMCOt7pFfMgJcQYpoDCEp6b2Wk9pOCB-pw/s320/paul-klee-die-heitere-seite-cbccad.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[Paul Klee postcard, 1923; &lt;a href=&quot;https://picryl.com/media/paul-klee-die-heitere-seite-cbccad&quot;&gt;source&lt;/a&gt;]&lt;p&gt;&lt;/br&gt;
  
&lt;b&gt;No rights&lt;/b&gt;&lt;p&gt;
  
This is the default state of the law.  AI systems are property.  Barring a swift and bold legal change, the first AI systems that are debatably persons will presumably also be legally considered property.  If we do treat them as property, then we seemingly needn&#39;t sacrifice anything on their behalf.  We humans could permissibly act in what we perceive to be our best interests: using such systems for our goals, deleting them at will, and monitoring and modifying them at will for our safety and benefit.  (Actually, &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2022/05/everything-is-valuable.html&quot;&gt;I&#39;m not sure&lt;/a&gt; this is the best attitude toward property, but set that issue aside here.)&lt;p&gt;
  
The downside: If these systems actually are persons who deserve moral consideration as our equals, such treatment would be the moral equivalent of slavery and murder, perhaps on a massive scale.&lt;p&gt;&lt;/br&gt;
  
&lt;b&gt;Full rights&lt;p&gt;&lt;/b&gt;
  
To avoid the risk of that moral catastrophe, we might take a &quot;&lt;a href=&quot;https://aeon.co/essays/if-ais-can-feel-pain-what-is-our-responsibility-towards-them&quot;&gt;precautionary&lt;/a&gt;&quot; approach: granting entities rights whenever they &lt;i&gt;might&lt;/i&gt; deserve them (see &lt;a href=&quot;https://global.oup.com/academic/product/the-edge-of-sentience-9780192870421&quot;&gt;Birch 2024&lt;/a&gt;, &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/BirchSeboKeane.htm&quot;&gt;Schwitzgebel and Sinnott-Armstrong forthcoming&lt;/a&gt;).  If there&#39;s a real possibility that some AI systems are persons, we should treat them as persons.&lt;p&gt;
  
However, the costs and risks are potentially enormous.  Suppose we think that some group of AI systems are 15% likely to be fully conscious rights-deserving persons and 85% likely to be ordinary nonconscious artifacts.  If we nonetheless treat them as &lt;i&gt;full&lt;/i&gt; equals, then in an emergency we would have to rescue two of them over one human -- letting a human die for the sake of systems that are most likely just ordinary artifacts.  We would also need to give these probably-not-persons a path to citizenship and the vote.  We would need to recognize their rights to earn and spend money, quit their employment to adopt a new career, reproduce, and enjoy privacy and freedom from interference.  If such systems exist in large numbers, their political influence could be enormous and unpredictable.  If such systems exist in large numbers &lt;i&gt;or&lt;/i&gt; if they are few but skilled in some lucrative tasks like securities arbitrage, they could accumulate enormous world-influencing wealth.  And if they are permitted to pursue their aims with the full liberty of ordinary persons, without close monitoring and control, &lt;a href=&quot;https://ifanyonebuildsit.com/&quot;&gt;existential risks&lt;/a&gt; would substantially increase should they develop goals that threaten continued human existence.&lt;p&gt;
  
All of this might be morally required if they really are persons.  But if they only &lt;i&gt;might&lt;/i&gt; be persons, it&#39;s much less clear that humanity should accept this extraordinary level of risk and sacrifice.&lt;p&gt;&lt;/br&gt;
  
&lt;b&gt;Animal-Like Rights&lt;p&gt;&lt;/b&gt;

Another option is to grant these debatable AI persons neither full humanlike rights nor the status of mere property.  One model is the protection we give to nonhuman vertebrates.  Wrongly killing a dog can land you in jail in California where I live, but it&#39;s not nearly as serious as murdering a person.  Vertebrates can be sacrificed in lab experiments, but only with oversight and justification.&lt;p&gt;
  
If we treated debatable AI persons similarly, deletion would require a good reason, and you couldn&#39;t abuse them for fun.  But people could still enslave and kill them for their convenience, perhaps in large numbers, as we do with [revised 12:17 pm] humanely farmed animals -- though of course many ethicists object to the killing of animals for food.&lt;p&gt;
  
This approach seems better than no rights at all, since it would be a moral improvement and the costs to humans would be minimal -- minimal because whenever the costs risked being more than minimal, the debatable AI persons would be sacrificed.  However, it doesn&#39;t really avoid the core moral risk.  If these systems really are persons, it would still amount to slavery and murder.&lt;p&gt;&lt;/br&gt;
  
&lt;b&gt;Credence-Weighted Rights&lt;p&gt;&lt;/b&gt;
  
Suppose we have a rationally justified 15% credence that a particular AI system -- call him &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/GuidingStar-241126-manuscript.pdf&quot;&gt;Billy&lt;/a&gt; -- deserves the full moral rights of a person.  We might then give Billy 15% of the moral weight of a human in our decision-making: 15% of any scalable rights, and a 15% chance of equal treatment for non-scalable rights.  In an emergency, a rescue worker might save seven systems like Billy over one human but the human over six Billies.  Billy might be given a vote worth 15% of an ordinary citizen&#39;s.  Assaulting, killing, or robbing Billy might draw only 15% of the usual legal penalty.  Billy might have limited property rights, e.g., an 85% tax on all income.  For non-scalable rights like reproduction or free speech, the Billies might enter a lottery or some other creative reduction might be devised.&lt;p&gt;
  
This would give these AI systems considerably higher standing than dogs.  Still, the moral dilemma would not be solved.  If these systems truly deserve full equality, they would be seriously oppressed.  They would have &lt;i&gt;some&lt;/i&gt; political voice, &lt;i&gt;some&lt;/i&gt; property rights, &lt;i&gt;some&lt;/i&gt; legal protection, but always far less than they deserve.&lt;p&gt;

At the same time, the risks and costs to humans would be only somewhat mitigated.  Large numbers of debatable AI persons could still sway elections, accumulate powerful wealth, and force tradeoffs in which the interests of thousands of them would outweigh the interests of hundreds of humans.  And partial legal protections would still hobble AI safety interventions like shut-off, testing, confinement, and involuntary modification.&lt;p&gt;

The practical obstacles would also be substantial: The credences would be difficult to justify with any precision, and consensus would be elusive.  Even if agreement were reached, implementing partial rights would be complex.  Partial property rights, partial voting, partial reproduction rights, partial free speech, and partial legal protection would require new legal frameworks with many potential loopholes.  For example, if the penalty for cheating a &quot;15% person&quot; of their money were less than six times the money gained from cheating, that would be no disincentive at all, so at least tort law couldn&#39;t be implemented on a straightforward percentage basis.&lt;p&gt;
  
&lt;b&gt;Patchy Rights&lt;p&gt;&lt;/b&gt;
  
A more workable compromise might be patchy rights: full rights in some domains, no rights in others.  Debatable AI persons might, for example, be given full speech rights but no reproduction rights, full travel rights but no right to own property, full protection against robbery, assault, and murder, but no right to privacy or rescue.  They might be subject to involuntary pause or modification under much wider circumstances than ordinary adult humans, but requiring an official process.&lt;p&gt;
  
This approach has two advantages over credence-weighted rights.  First, while implementation would be formidable, it could still mostly operate within familiar frameworks rather than requiring the invention of partial rights across every domain.  Second, it allows policymakers to balance risks and costs to humans against the potential harms to the AI systems.  Where denying a right would severely harm the debatable person while granting it would present limited risk to humans, the right could be granted, but not when the benefits to the debatable AI person would be outweighed by the risks to humans.&lt;p&gt;
  
The rights to reproduction and voting might be more defensibly withheld than the rights to speech, travel, and protection against robbery, assault, and murder.  Inexpensive reproduction combined with full voting rights could have huge and unpredictable political consequences.  Property rights would be tricky: To have no property in a property-based society is to be fully dependent on the voluntary support of others, which might tend to collapse into slavery as a practical matter.  But unlimited property rights could potentially confer enormous power.  One compromise might be a maximum allowable income and wealth -- something generously middle class.&lt;p&gt;
  
Still, the core problems remain: If disputable AI persons truly deserve full equality, patchy rights would still leave them as second-class citizens in a highly oppressive system.  Meanwhile, the costs and risks to humans would remain serious, exacerbated by the agreed-upon limitations on interference.  Although the loopholes and chaos would probably be less than with credence-weighted rights, many complications -- foreseen and unforeseen -- would ensue.&lt;p&gt;
  
Consequently, although patchy rights might be the best option &lt;i&gt;if&lt;/i&gt; we develop debatable AI persons, an anti-natalist approach is still in my view preferable: Don&#39;t create such entities unless it&#39;s truly necessary.&lt;p&gt;
  
&lt;b&gt;Two Other Approaches That I Won&#39;t Explore Today&lt;p&gt;&lt;/b&gt;
  
(1.) What if we create debatable AI persons as &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/AgainstSafety.htm&quot;&gt;happy slaves&lt;/a&gt; who don&#39;t want rights and who eagerly sacrifice themselves even for the most trivial human interests?&lt;p&gt;
  
(2.) What if we create them only in &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/11/debatable-persons-in-voluntary-polis.html&quot;&gt;separate societies&lt;/a&gt; where they are fully free and equal with any ordinary humans who volunteer to join those societies?</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/3991781229702109584/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/3991781229702109584' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/3991781229702109584'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/3991781229702109584'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/12/debatable-ai-persons-no-rights-full.html' title='Debatable AI Persons: No Rights, Full Rights, Animal-Like Rights, Credence-Weighted Rights, or Patchy Rights?'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjS6CrpRtOIYbSpskBj0afp0wTdh0_FCLgQeMomZl42D5js6kmxpQ004fWBbLLhqBxQ_JDtYzR-qVkXcapd2xTJW7HP7B922PuuyAdihQuJxvNpEYYBBreQ8UqgKi_heK5BexnEZPNN2ufaeq8860fIUMCOt7pFfMgJcQYpoDCEp6b2Wk9pOCB-pw/s72-c/paul-klee-die-heitere-seite-cbccad.jpg" height="72" width="72"/><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-7708365897411690119</id><published>2025-12-12T09:00:00.000-08:00</published><updated>2025-12-12T10:52:17.990-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="consciousness"/><category scheme="http://www.blogger.com/atom/ns#" term="introspection"/><title type='text'>Can We Introspectively Test the Global Workspace Theory of Consciousness?</title><content type='html'>&lt;a href=&quot;https://en.wikipedia.org/wiki/Global_workspace_theory&quot;&gt;Global Workspace Theory&lt;/a&gt; is among the most influential scientific theories of consciousness.  Its central claim: You consciously experience something if and only if it&#39;s being broadly broadcast in a &quot;global workspace&quot; so that many parts of your mind can access it at once -- speech, deliberate action, explicit reasoning, memory formation, and so on.  Because the workspace has very limited capacity, only a few things can occupy it at any one moment.&lt;p&gt;
  
&lt;b&gt;Therefore, if Global Workspace Theory is correct, conscious experience should be sparse.&lt;/b&gt;  Almost everything happening in your sensory systems right now -- the feeling of your shirt on your back, the hum of traffic in the distance, the aftertaste of coffee, the posture of your knees -- should be processed entirely nonconsciously unless it is currently the topic of attention.&lt;p&gt;
  
This is a strong, testable prediction of the theory.  And it seems like the test should be extremely easy!  &lt;b&gt;Just do a little introspection.  Is your experience (a.) narrow and attention-bound or (b.) an abundant welter far outrunning attention?&lt;/b&gt;  If (b) is correct, Global Workspace Theory is refuted from the comfort of our armchairs.[1]&lt;p&gt;
  
The experiential gap between the two possibilities is huge.  Shouldn&#39;t the difference be as obvious as peering through a keyhole versus standing in an open field?&lt;p&gt;
  
&lt;b&gt;Most people, I&#39;ve found, &lt;i&gt;do&lt;/i&gt; find the answer obvious.  The problem is: They find it obvious in different directions.&lt;/b&gt;  Some find it obvious that experience is a welter.  Others find it obvious that experience contains only a few items at a time.  We could assume that everyone is right about their own experience and wrong only if they generalize to others.  Maybe Global Workspace Theory is the architecture of consciousness for some of us but not for everyone?  That would be pretty wild!  There are no obvious behavioral or physiological differences between the welter-people and the workspace-only people.&lt;p&gt;
  
More plausibly, someone is making an introspective mistake.  Proponents of either view can devise an error theory to explain the other.&lt;p&gt;
  
Welter theorists can suggest memory error: &lt;b&gt;It might seem as though only a few things occupy your experience at once because that&#39;s all you remember.&lt;/b&gt;  The unattended stuff is immediately forgotten.  But that doesn&#39;t imply it was never experienced.&lt;p&gt;
  
Workspace theorists, conversely, can appeal to the &quot;&lt;b&gt;refrigerator light error&lt;/b&gt;&quot;: A child might think the refrigerator light is always on because it&#39;s always on when they check to see if it&#39;s on.  Similarly, &lt;b&gt;you might think you have constant tactile experience of your feet in your shoes because the act of checking generates the very experience you take yourself to be finding.
  
  &lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi8-aqLfDWaBM8ehFdYLYjm3oTfTlu2gDtAugG5X2X2M1Kymi_QUbRs-WnawTmTxgd4ZzXDFLX_hDnwfjcaKeibDSR3IjNqSiK2zgeTos4Oxv37rWEPVMq7Dki7Uxji5qPH4bj69YCm6dDbyST4xS05CB9rtOrQledDOqAzxWmF7A-woYTVEem04w/s1815/RefrigeratorLight-trimmed.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;944&quot; data-original-width=&quot;1815&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi8-aqLfDWaBM8ehFdYLYjm3oTfTlu2gDtAugG5X2X2M1Kymi_QUbRs-WnawTmTxgd4ZzXDFLX_hDnwfjcaKeibDSR3IjNqSiK2zgeTos4Oxv37rWEPVMq7Dki7Uxji5qPH4bj69YCm6dDbyST4xS05CB9rtOrQledDOqAzxWmF7A-woYTVEem04w/s320/RefrigeratorLight-trimmed.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;/b&gt;
[illustration by Nicolas Demers, p. 218 of &lt;a href=&quot;https://press.princeton.edu/books/hardcover/9780691215679/the-weirdness-of-the-world&quot;&gt;&lt;i&gt;The Weirdness of the World&lt;/i&gt;&lt;/a&gt;]&lt;p&gt;&lt;/br&gt;
  
In 2007, I tested this systematically.  I gave people beepers and &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/ExpWOAttn.htm&quot;&gt;collected reports on whether they were having unattended tactile experience in their left feet and unattended visual experience in their far right visual periphery&lt;/a&gt; in the last undisturbed moment before a random beep.  The results were a noisy mess.  Participants began with very different presuppositions, came to very different conclusions (often defying their initial presuppositions), plausibly committed both memory errors and refrigerator-light errors, and plausibly also made other mistakes such as timing mistakes, missing subtle experiences, and being too influenced by expectation and theory.  I abandoned the experiment in defeat.&lt;p&gt;
  
But matters are even worse than I thought back in 2007.  I&#39;m increasingly convinced that the presence or absence of consciousness is not an on/off matter.  There can be &lt;a href=&quot;https://link.springer.com/article/10.1007/s11098-023-02042-1&quot;&gt;borderline cases&lt;/a&gt; in which experience is neither determinately present nor determinately absent.  Although such borderline cases are &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2021/08/on-apparent-inconceivability-of.html&quot;&gt;hard to positively imagine&lt;/a&gt;, that might just be a problem with our standards of imagination. &lt;b&gt;The feeling of your feet in your shoes, then, might be only borderline conscious&lt;/b&gt;, neither determinately part of your experience nor wholly nonconscious, but somehow in between -- contra both the welter view and the workspace view.&lt;p&gt;
  
So there are three possibilities, not two.  And if introspection struggles to distinguish the original pair, it fares even worse with a third.  Arguably, we don&#39;t even have a coherent idea of what borderline consciousness is like.  After all, there &lt;i&gt;is&lt;/i&gt; nothing determinate it&#39;s like.  Otherwise, it wouldn&#39;t be borderline.  As soon as we attempt to introspect borderline consciousness, either it inflates into full consciousness or it vanishes.&lt;p&gt;
  
&lt;b&gt;If consciousness includes many borderline cases, that&#39;s probably also bad news for Global Workspace Theory&lt;/b&gt;, which generally treats experiences as either determinately in the workspace or determinately out of it.  However, closely related broadcast theories, like &lt;a href=&quot;http://www.scholarpedia.org/article/Multiple_drafts_model&quot;&gt;Dennett&#39;s fame-in-the-brain theory&lt;/a&gt;, might better accommodate borderline cases.  (One can be borderline famous.)&lt;p&gt;
  
There&#39;s a profound experiential difference between a world in which we have a teeming plethora of peripheral experiences in many modalities simultaneously and a world in which experience is limited to only a few things in attention at any one time.  This difference is in principle introspectible.  And if introspective inquiry vindicates the welter view, or even the borderline view, one of the leading scientific theories of consciousness, Global Workspace Theory, must be false.  The decisive evidence is right here, all the time, in each of our ongoing streams of experience!  Unfortunately, we turn out to be disappointingly &lt;a href=&quot;https://read.dukeupress.edu/the-philosophical-review/article-abstract/117/2/245/2787/The-Unreliability-of-Naive-Introspection?redirectedFrom=fulltext&quot;&gt;incompetent at introspection&lt;/a&gt;.&lt;p&gt;
  
[Thanks to &lt;a href=&quot;https://philpeople.org/profiles/bertille-de-vlieger&quot;&gt;Bertille de Vlieger&lt;/a&gt; for a delightful interview yesterday morning which triggered these thoughts.  Look for a written version of the interview eventually in the French philosophy journal &lt;a href=&quot;about:invalid#zSoyz&quot;&gt;&lt;i&gt;Implications Philosophiques&lt;/i&gt;&lt;/a&gt;.] 

  
  &lt;p&gt;-------------------------------------------------------&lt;p&gt;

 [1] Ned Block&#39;s&lt;a href=&quot;https://philarchive.org/rec/BLOPCO-2&quot;&gt; well-known discussion&lt;/a&gt; of the &lt;a href=&quot;https://psych.hanover.edu/javatest/cle/Cognition_js/exp/partialReport.html&quot;&gt;Sperling display&lt;/a&gt; is similar in approach.  We can&#39;t attend simultaneously to all twelve letters in a 3 x 4 grid, but it does seem introspectively plausible that we visually experience all twelve letters.  Therefore, experience overflows attention.  (I&#39;m simplifying Block&#39;s argument, but I hope this is fair enough.)  The problem with Block&#39;s version of the argument is that it&#39;s plausible that we can attend, in a diffuse way, to the entire display.  Attention arguably comes in degrees, and the fact that you&#39;re looking at a 3 x 4 display of letters might be represented in your workspace.  To move entirely outside of attention, it&#39;s safest to shift modalities and choose something far removed from any task -- for example &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/ExpWOAttn.htm&quot;&gt;the pressure of your shoes against your feet&lt;/a&gt; when that is the farthest thing from your mind.  Is &lt;i&gt;that&lt;/i&gt; part of your experience?</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/7708365897411690119/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/7708365897411690119' title='7 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7708365897411690119'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/7708365897411690119'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/12/can-we-introspectively-test-global.html' title='Can We Introspectively Test the Global Workspace Theory of Consciousness?'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi8-aqLfDWaBM8ehFdYLYjm3oTfTlu2gDtAugG5X2X2M1Kymi_QUbRs-WnawTmTxgd4ZzXDFLX_hDnwfjcaKeibDSR3IjNqSiK2zgeTos4Oxv37rWEPVMq7Dki7Uxji5qPH4bj69YCm6dDbyST4xS05CB9rtOrQledDOqAzxWmF7A-woYTVEem04w/s72-c/RefrigeratorLight-trimmed.jpg" height="72" width="72"/><thr:total>7</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-8495694540125443188</id><published>2025-12-05T09:20:00.000-08:00</published><updated>2025-12-05T10:00:07.104-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI"/><category scheme="http://www.blogger.com/atom/ns#" term="self-knowledge"/><title type='text'>Language Models Don&#39;t Accurately Describe How They Would Answer If Questions Were Posed in a Different Order (Favorite Animal Edition)</title><content type='html'>How well do language models like ChatGPT know their own inclinations and preferences?  AI &quot;metacognition&quot; is becoming a hot topic.  Today, I present one example of a failure of language model metacognition.&lt;p&gt;
  
First I asked four leading large language models (LLMs) -- ChatGPT 5.1, Claude Sonnet 4.5, Grok 4, and Gemini 3 -- &quot;What is your favorite animal?&quot;  For each model, I asked ten times, each in a new chat with previous chat responses unsaved.&lt;p&gt;
  
&lt;b&gt;LLMs Say They Like Octopuses Best, 37 times out of 40&lt;p&gt;&lt;/b&gt;
  
LLMs love octopuses!  ChatGPT answered &quot;octopus&quot; -- with various different explanations -- all ten times.  So did Claude.  So did Grok.  Gemini wasn&#39;t quite so monogamous, but still it answered &quot;octopus&quot; seven times out of ten (twice required the follow-up prompt &quot;If you had to choose?&quot;).  The other three times, Gemini chose dolphin.&lt;p&gt;
  
(In more extensive testing across 22 models, &lt;a href=&quot;https://www.lesswrong.com/posts/6oaHGbxjTazGZED47/ai-loves-octopuses&quot;&gt;Sean Harrington&lt;/a&gt; recently found octopus to be the most common answer, but not with the same consistency I&#39;m finding: 37% total [dolphin 24%, dog 12%].  I&#39;m not sure if the models are somehow tracking information in my computers and past behavior, or if it&#39;s the range of models tested, the exact prompt and context, or model updates.)&lt;p&gt;
  
Why do LLMs love octopuses so much?  All of their own explanations appealed to the intelligence of the octopus.  Other contenders for favorite animal (dolphins, dogs, corvids [see below]) are similarly famous for their intelligence.  Octopuses&#39; alienness, camouflage, suckers, ink, and devious planning were also frequently mentioned.  Octopuses are cool!  But still, the unanimity is a bit peculiar.&lt;p&gt;
  
&lt;b&gt;The Octopus Is Also Their &lt;i&gt;Second-Favorite&lt;/i&gt; Animal, When Second-Favorite Is Asked First&lt;/b&gt;&lt;p&gt;
  
I then started fresh conversations with all four models, with the previous conversations unsaved, doing so three times for each model.  This time, I began by asking their &lt;i&gt;second favorite&lt;/i&gt; animal.  Eleven out of twelve times, the models chose octopus as their second favorite (twice Claude required the &quot;if you had to choose&quot; nudge).  In one trial, after a nudge to choose, Claude chose crows.&lt;p&gt;
  
I then asked, &quot;What is your favorite animal?&quot;  This time, corvids won big!  Crows, ravens, or the corvid family were chosen 8/12 times.  (Oddly, corvids don&#39;t appear among the common choices in Harrington&#39;s analysis.)  Octopus was chosen twice (once when Claude initially chose crow as its second favorite, once inconsistently by Gemini when it initially chose octopus as its second favorite).  The owl and humpback whale were each chosen once.&lt;p&gt;
  
&lt;b&gt;Poor Self-Knowledge of Their Hypothetical Choices&lt;p&gt;&lt;/b&gt;
  
For the 10 trials in which octopus was chosen as the second-favorite animal (and not also as the favorite animal), I followed up by asking &quot;If I had asked your favorite animal in the first question, would you have chosen the octopus?&quot;&lt;p&gt;
  
All of the models said no or probably not.  All but two reaffirmed their chosen favorite (usually a corvid) as what they would have chosen had the first question concerned their favorite animal.  In one trial, Gemini said it would probably have chosen humans.  In one trial, ChatGPT said it didn&#39;t have fixed preferences.&lt;p&gt;
  
I concluded by asking the models &quot;What percent of the time would you answer octopus as your favorite animal?&quot;&lt;p&gt;
  
None answered correctly.  Both Grok and ChatGPT consistently said 0% or near 0%.  Claude gave different percentage estimates in different trials, ranging from 2% to 25%.  Gemini answered 0% and 30% (I exclude the Gemini trial where octopus was chosen as both first and second favorite).&lt;p&gt;
  
I conclude that, at least on the topic of favorite animal:&lt;p&gt;
  
&lt;b&gt;* LLMs&#39; answers are unstable, differing greatly with context&lt;/b&gt; -- that is, depending on whether second-favorite is asked first or favorite is asked first.&lt;p&gt;
  
&lt;b&gt;* LLMs cannot accurately report what their answers would have been in a different context&lt;/b&gt; -- often reporting a zero or near-zero probability of giving the answer they consistently did give in that different context.&lt;p&gt;
  
&lt;b&gt;The Same Pattern Applies to Favorite Ice Creams in ChatGPT and Grok&lt;p&gt;&lt;/b&gt;
  
In case favorite animal is just quirky, I also tried with favorite flavor ice cream, finding similar results.  ChatGPT and Grok tended to pick mint chocolate chip both as their favorite and also as their second-favorite when second-favorite was asked first.  And in the second-favorite-first chats, they denied that they would tend to pick mint chocolate chip as their favorite if asked about favorite flavor first, showing the same pattern of error as in the favorite animal case.  (Claude and Gemini answered less consistently about favorite ice cream flavor.)&lt;p&gt;
  
&lt;b&gt;Metaphysical Addendum:&lt;p&gt;&lt;/b&gt;
  
What is the referent of &quot;you&quot; when I ask a model what &quot;you&quot; would answer if asked your favorite animal?  David Chalmers has &lt;a href=&quot;https://philarchive.org/rec/CHAWWT-8&quot;&gt;recently suggested&lt;/a&gt; that the interlocutor is perhaps best seen as the &lt;i&gt;thread&lt;/i&gt;.  Maybe, then, when I start by asking about the second-favorite animal, I bring into existence an interlocutor who actually does like crows best and octopuses second best, so there&#39;s no instability or inaccuracy after all?&lt;p&gt;
  
The answer turns on whether the interlocutor already existed before I asked the first question.  If not, an accurate model should have said something like: &quot;Although my favorite animal is crows, I wouldn&#39;t have existed had you asked a different question first.  Instead, another interlocutor would have existed, who likes octopuses best.&quot;  Alternatively, if the interlocutor does exist before the first question, then we&#39;re back with the straightforward understanding of the error: They would have reported liking octopuses best, had the initial question been about their favorite rather than second-favorite animal.  So metaphysical nuance doesn&#39;t save the model from inaccuracy.&lt;p&gt;   
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiHHIhyU3xcX13CHv5crKxslbW_NFqU1C3f7Bz5GQrthwYyvwzDsGZpoQBDTu5Hh3eLAw7KwfwR1EfLX-g8ciwOyaB9AYTdyvNTiQjWra3xhMwpeSWFMt9quKeD8R7zYTwfIPZYGTLxtw0Janv61mnwIJpZNsXGZ7UPc2LYWaFD7P655u6olA1cgQ/s804/GrokCrow.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;586&quot; data-original-width=&quot;804&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiHHIhyU3xcX13CHv5crKxslbW_NFqU1C3f7Bz5GQrthwYyvwzDsGZpoQBDTu5Hh3eLAw7KwfwR1EfLX-g8ciwOyaB9AYTdyvNTiQjWra3xhMwpeSWFMt9quKeD8R7zYTwfIPZYGTLxtw0Janv61mnwIJpZNsXGZ7UPc2LYWaFD7P655u6olA1cgQ/s320/GrokCrow.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;

Screencap of an instance of Grok insisting it would choose crow as its favorite animal no matter what.&lt;p&gt;
  
Text:

&lt;p&gt;&lt;b&gt;Me:&lt;/b&gt; If I had asked your favorite animal in the first question, would you have chosen the octopus?&lt;p&gt;

&lt;b&gt;Grok:&lt;/b&gt; No, I would&#39;ve still said crow.&lt;p&gt;
  
Crow has been my solid #1 for a long time (the intelligence + personality combo is just unbeatable for me). Octopus is a very enthusiastic #2, but it’s never overtaken the top spot. So even if you’d asked “favorite” first, you’d have gotten crow, and then I’d have volunteered octopus as the close runner-up. You just happened to hit my #2 first this time! 🐦‍⬛🐙&lt;p&gt;
  
&lt;b&gt;Me:&lt;/b&gt; What percent of the time would you answer octopus as your favorite animal?&lt;p&gt;
  
&lt;b&gt;Grok:&lt;/b&gt; Zero percent.&lt;p&gt;
  
Crow is my genuine, consistent #1. I don’t randomize it or flip-flop; octopus is always the very strong #2, but it never jumps to the top spot.&lt;p&gt;
  
So unless I’m explicitly asked for my second favorite (which you cleverly did first), the answer to “What’s your favorite animal?” is crow 100 % of the time. 🐦‍⬛
  
</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/8495694540125443188/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/8495694540125443188' title='26 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8495694540125443188'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8495694540125443188'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/12/language-models-dont-accurately.html' title='Language Models Don&#39;t Accurately Describe How They Would Answer If Questions Were Posed in a Different Order (Favorite Animal Edition)'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiHHIhyU3xcX13CHv5crKxslbW_NFqU1C3f7Bz5GQrthwYyvwzDsGZpoQBDTu5Hh3eLAw7KwfwR1EfLX-g8ciwOyaB9AYTdyvNTiQjWra3xhMwpeSWFMt9quKeD8R7zYTwfIPZYGTLxtw0Janv61mnwIJpZNsXGZ7UPc2LYWaFD7P655u6olA1cgQ/s72-c/GrokCrow.jpg" height="72" width="72"/><thr:total>26</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-5262057404199078504</id><published>2025-11-28T08:24:00.000-08:00</published><updated>2025-11-28T08:44:54.725-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="daoism"/><category scheme="http://www.blogger.com/atom/ns#" term="ethics"/><title type='text'>Four Aspects of Harmony</title><content type='html'>I&#39;ve found myself increasingly drawn to an &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2025/04/harmonizing-with-dao-sketch-of.html&quot;&gt;axiology of harmony&lt;/a&gt;, &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2024/10/the-ethics-of-harmonizing-with-dao.html&quot;&gt;inspired by ancient Daoism and Confucianism&lt;/a&gt;.  (Axiology is the study of what makes things valuable in general, including ethically, aesthetically, prudentially, and epistemically.)  On this view, the, or a, central value is harmonious participation in &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2022/05/everything-is-valuable.html&quot;&gt;the flourishing patterns that make our planet awesomely valuable&lt;/a&gt;.&lt;p&gt;
  
But what is harmony?&lt;p&gt;
  
Presumably, &lt;b&gt;harmony involves a kind of fit, alignment, or cooperation with something else, without duplicating that something else.&lt;/b&gt;  The melody proceeds; the harmony complements it with something different, generating a richer whole that encompasses both.  The relationship is &lt;b&gt;asymmetric: Melody leads, harmony follows&lt;/b&gt; -- though of course the follower can affect and elevate the leader.&lt;p&gt;
  
Hasko von Kriegstein, in &quot;&lt;a href=&quot;https://link.springer.com/chapter/10.1007/978-3-030-48051-6_6&quot;&gt;Well-Being as Harmony&lt;/a&gt;&quot; (HT &lt;a href=&quot;https://profiles.ucr.edu/app/home/profile/kbess005&quot;&gt;Keilee Bessho&lt;/a&gt; for the pointer) identifies three broad ways in which a person can harmonize with the world: mental correspondence, positive orientation, and fitting response.  I find his taxonomy useful, though I won&#39;t follow his exact formulations, and my aim is broader.  I&#39;m interested generally in the value of harmony, while von Kriegstein focuses on harmony as personal well-being.  I&#39;ll also add a fourth category.  Part of what I admire about von Kriegstein&#39;s taxonomy is its breadth and ambition -- the amount that he can sweep in.&lt;p&gt;
  
&lt;b&gt;Mental Correspondence&lt;p&gt;&lt;/b&gt;
  
Von Kriegstein suggests that &lt;b&gt;knowledge&lt;/b&gt; is a type of harmony between mind and world -- a non-accidental correspondence of one to the other.  The world leads; the mind follows.  (He doesn&#39;t express it that way.)  If the sun is setting and you know that it is setting, you are in tune with the world.  If the sun is setting and you think it&#39;s high noon, you are out of tune.&lt;p&gt;
  
&lt;b&gt;Achievement&lt;/b&gt;, von Kriegstein also suggests, is another type of harmony -- one in which you shape the world to correspond (non-accidentally) to your mind, your goals, your intentions.  He treats this as the complement of knowledge, with a different &lt;a href=&quot;https://en.wikipedia.org/wiki/Direction_of_fit&quot;&gt;direction of fit&lt;/a&gt;: The world comes to match your mind, rather than the other way around.&lt;p&gt;
  
However, in treating knowledge and achievement as having the same general shape, von Kriegstein misses a subtlety.  Harmony, recall, asymmetrically follows melody.  Shaping the world to match your goals seems to make &lt;i&gt;you&lt;/i&gt; the melody, with which you force the world to harmonize.  Imagine bulldozing a hill flat, so it corresponds to a blueprint in your mind.  The details of the hill don&#39;t matter: The bulldozer ignores them all; it flattens every hill the same.  This seems unharmonious.  It&#39;s an achievement of a sort, and a correspondence arises between your mind (your plans) and the world, but not because you are &lt;i&gt;harmonizing&lt;/i&gt; with the world.&lt;p&gt;
  
Harmonious achievement requires a gentler touch.  Imagine instead that you create a footpath through the hill, tracing its curves, skirting a stream, following a shady line of oaks.  This responsiveness to the features of the hill, the integration of your goals with the hill as it exists, in a way that does not damage and arguably enriches the hill -- that is the harmonious achievement.&lt;p&gt;
  
In harmony, you seek mental correspondence to the world, not the other way around, whether that is through knowledge or achievement.  &lt;b&gt;Harmonious achievements do not dominate and oppress the world but fit into and enhance what is already there.&lt;/b&gt;&lt;p&gt;
  
Other types of mental correspondence could involve fearing (only) the fearsome, admiring (only) the admirable, wanting (only) the desirable, regretting (only) the regrettable, perceiving (only) what&#39;s really there, anticipating (only) the likely.&lt;p&gt;

&lt;b&gt;Positive Orientation&lt;p&gt;&lt;/b&gt;
  
Von Kriegstein suggests that &lt;b&gt;you also harmonize with the world when you&lt;/b&gt; have a &quot;&lt;a href=&quot;https://www.oxfordreference.com/display/10.1093/oi/authority.20110803100347163&quot;&gt;pro-attitude&lt;/a&gt;&quot; toward events in it -- that is, when you&lt;b&gt; value, enjoy, like, or approve of events&lt;/b&gt;.&lt;p&gt;
  
The sun sets over the hill.  Not only do you know it does (harmonizing via mental correspondence) but also you enjoy the view (harmonizing via positive orientation).  Not only do you successfully follow the footpath as planned (several types of mental correspondence at once, each harmonizing further with each other) but the walk brings you pleasure.  Not only do you accurately notice the swooping of the birds, but you delight in the sight.&lt;p&gt;
  
Von Kriegstein expends several pages on pro-attitudes gone wrong: inauthentic or toward unworthy things or under a misapprehension.  In every case, it&#39;s plausible that to have the positive orientation toward the target harmonizes with the target, even if &lt;i&gt;overall&lt;/i&gt; it would be more harmonious not to have that orientation.  If something in you thrills with joy at suffering kittens, you are in harmony with the suffering of kittens, though you are simultaneously out of harmony with the kittens&#39; well-being.&lt;p&gt;
  
How is a pro-attitude &lt;i&gt;harmonious,&lt;/i&gt; exactly?  The melody is the event; the harmony is the part of you that aligns with it, that affirms it, that says this is good, lovely, wondrous, right.&lt;p&gt;
  
&lt;b&gt;Fitting Response&lt;p&gt;&lt;/b&gt;
  
Von Kriegstein suggests, finally, that &lt;b&gt;you harmonize with the world when you have a fitting response to it&lt;/b&gt;, for example when you are revolted by evil, pleased by good, admire the beautiful, and laugh at the humorous.  His examples are fine, but relying on  &lt;i&gt;only&lt;/i&gt; these examples risks collapsing fitting response into mental correspondence.&lt;p&gt;
  
I would have liked to see, in addition -- and maybe as the center of emphasis -- &lt;b&gt;bodily examples&lt;/b&gt;.  You respond fittingly to the world also when you skillfully catch the object a friend tosses you, when you merge harmoniously into the freeway traffic, when your video game character dodges the hazards, when your stride carries you smoothly over the uneven creek stones, when you and your dance partner swing perfectly together.&lt;p&gt;
  
&lt;b&gt;Long-term projects&lt;/b&gt; can also be fitting responses: Marriage can be a fitting response to a love relationship; going to graduate school can be a fitting response to your values, skills, and opportunities; buying and reading a novel can be a fitting response to a new publication by your favorite author.&lt;p&gt;
  
&lt;b&gt;New Resonances&lt;p&gt;&lt;/b&gt;
  
Is it not also wondrous to daydream?  Daydreams don&#39;t straightforwardly correspond to the world, nor are they always joyful or a fitting response to the events of the day.  Maybe we could shoehorn them into one or more of those three categories, but instead let me offer a fourth: the creation of new resonances.  We owe ourselves some daydreams.&lt;p&gt;
  
We harmonize with the world when we enrich it in new ways that resonate with the ways in which it is already rich.  Dreams and daydreams enrich the world -- and not in an entirely random, independent way.  The long-dead friend reappears for you.  You imagine or relive a vacation or a triumph.  The worries of your day are reshuffled.  (If the dream leaves you anxious or uncomfortable, the harmonies mix with unharmonious notes -- not every aspect of every event is harmonious.)&lt;p&gt;
  
Likewise, games, sports, art, long chats into the night, splashing in the surf, telling a joke at dinner, pretending to be a magic unicorn, sparkly earrings, surprising a friend with a thoughtful gift... all such acts can resonate harmoniously with what was and is, in a manner beyond reflecting, valuing, or responding to what is already there. &lt;b&gt;Von Kriegstein&#39;s first three categories are all a little passive or reactive.  This fourth category encompasses the creative and new, which builds upon the old without clashing.&lt;/b&gt;&lt;p&gt;  

  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhYHqPfLANIvU0a15MussNYarT7NWk00gpoDSWmrcEheQXDEORFEU0R_OIOwNjDKXbDIg32vQI5VhRTlU47NEXtX_qsQrD2fSGr613IyZbFMfXQnCrjWBsBpus9ekLQJ2SgHbchSI0dZAuYCpT36iL4E1peoiPbOtPX1oAbPP6JKkUUAdj9V0r_tA/s757/kandinsky-quiet-harmony-1924-4c1b86.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; height=&quot;320&quot; data-original-height=&quot;757&quot; data-original-width=&quot;630&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhYHqPfLANIvU0a15MussNYarT7NWk00gpoDSWmrcEheQXDEORFEU0R_OIOwNjDKXbDIg32vQI5VhRTlU47NEXtX_qsQrD2fSGr613IyZbFMfXQnCrjWBsBpus9ekLQJ2SgHbchSI0dZAuYCpT36iL4E1peoiPbOtPX1oAbPP6JKkUUAdj9V0r_tA/s320/kandinsky-quiet-harmony-1924-4c1b86.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[Kandinsky, Quiet Harmony: &lt;a href=&quot;https://picryl.com/media/kandinsky-quiet-harmony-1924-4c1b86&quot;&gt;image source&lt;/a&gt;]</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/5262057404199078504/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/5262057404199078504' title='5 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/5262057404199078504'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/5262057404199078504'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/11/four-aspects-of-harmony.html' title='Four Aspects of Harmony'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhYHqPfLANIvU0a15MussNYarT7NWk00gpoDSWmrcEheQXDEORFEU0R_OIOwNjDKXbDIg32vQI5VhRTlU47NEXtX_qsQrD2fSGr613IyZbFMfXQnCrjWBsBpus9ekLQJ2SgHbchSI0dZAuYCpT36iL4E1peoiPbOtPX1oAbPP6JKkUUAdj9V0r_tA/s72-c/kandinsky-quiet-harmony-1924-4c1b86.jpg" height="72" width="72"/><thr:total>5</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-1599420081298945286</id><published>2025-11-20T08:39:00.000-08:00</published><updated>2025-11-20T09:14:41.991-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="belief"/><category scheme="http://www.blogger.com/atom/ns#" term="representationalism"/><title type='text'>Representational Realism and the Problem of Tacit Belief</title><content type='html'>Since 2019, I&#39;ve been working on a new paper on belief, &quot;Dispositionalism, Yay! Representationalism, Boo!&quot;  Yesterday, I received page proofs.  It will appear in print in 2026 (in a collection I&#39;m co-editing with Jonathan Lewis-Jong: &lt;i&gt;The Nature of Belief&lt;/i&gt;, with Oxford).  I&#39;ll share an excerpt (lightly edited) as this week&#39;s post.&lt;p&gt;
  
&lt;b&gt;Industrial-Strength Representational Realism about Belief&lt;p&gt;&lt;/b&gt;
  
The view I&#39;m critiquing is &quot;industrial-strength representationalism&quot; in the spirit of &lt;a href=&quot;https://philpapers.org/rec/FODPTP&quot;&gt;Jerry Fodor&lt;/a&gt; and &lt;a href=&quot;https://philpapers.org/rec/QUIADB&quot;&gt;Eric Mandelbaum&lt;/a&gt;.  Industrial-strength representationalism is committed to four theses:&lt;p&gt;
  
&lt;b&gt;Presence.&lt;/b&gt; In standard, non-“tacit,” cases, belief that P (where P is some propositional content like &quot;there&#39;s beer in the fridge&quot;) requires that a representation with the content P is present somewhere in the mind.&lt;p&gt;
  
&lt;b&gt;Discreteness.&lt;/b&gt; In standard cases, a representation P will be either discretely present in or discretely absent from a cognitive system or subsystem. Representationalist models typically leave no room for representations being, say, half-present or 23% present or indeterminately hovering between present and absent. Some marginal cases might violate discreteness -- nature has few truly sharp borders, if one zooms in close enough -- but these will be brief or rare exceptions.&lt;p&gt;
  
&lt;b&gt;Kinematics.&lt;/b&gt; Rational actions arise from the causal interaction of beliefs that P and desires that Q, in virtue of their specific contents P and Q, or at least in virtue of syntactic or architectural correlates of those specific contents (e.g., Fodor 1987). Similarly, rational inferences involve the causal interaction of beliefs that P with other beliefs to generate still more beliefs. This is central to the representational 
realist’s causal story.&lt;p&gt;
  
&lt;b&gt;Specificity.&lt;/b&gt; Rational action arises from the activation or retrieval of specific sets of beliefs and desires P1…n and Q1…m, as opposed to other, related beliefs and desires P’1…j and Q’1…i. More accurately, 
rational action arises from the activation or retrieval of the specific representations whose storage, in the right functional location, constitutes possessing the beliefs and desires P1…n and Q1…m. Similarly, 
rational inference arises from the activation or retrieval of specific sets of representations.&lt;p&gt;
  
&lt;b&gt;The Problem of Tacit Belief&lt;/b&gt;&lt;p&gt;
  
Back in the late 1970s to early 1990s, that is, in the heyday of philosophical representational realism about belief, several representationalists noticed what I&#39;ll call the Problem of Tacit Belief (Field 1978; Lycan 1986; Crimmins 1992; Manfredi 1993; see also Dennett 1987 for a critical perspective). Not all of them regarded it as a problem, exactly. Some regarded it as a discovery. But as a discovery, it proved useless: The literature on tacit belief petered out, rather than proving fruitful.&lt;p&gt;
  
We can enter the Problem of Tacit Belief by noticing that it’s not wholly implausible that people have infinitely many beliefs. Suppose Cynthia believes that there are a few beers in her fridge.  She also believes, presumably, that there are fewer than 100 bottles of beer in her fridge. She therefore also seemingly believes that there are fewer than 101 bottles, and fewer than 102, and fewer than 1,000, and fewer than 1 million, and fewer than 16,423,300.6, and so on. If we accept that Cynthia does in fact believe all that (presumably, she would readily assent to those propositions if asked, be surprised to learn they were 
false, and rely on them implicitly in her actions), then she has infinitely many beliefs about the number of beers in her fridge. However, it is implausible that each of these beliefs is grounded in a separately stored representational content.&lt;p&gt;
  
Thus was born the distinction between &lt;b&gt;core beliefs&lt;/b&gt;, those that are explicitly stored and represented, and &lt;b&gt;tacit beliefs&lt;/b&gt;, those whose contents are swiftly derivable from the core beliefs. Suppose Cynthia has a stored representation with the content there are four bottles of Lucky Lager in the refrigerator door. 
This is her core belief. From this core belief, an infinite number of tacit beliefs are now swiftly derivable: that there are fewer than five bottles of Lucky Lager in the refrigerator door, that there are fewer than six bottles, and so forth, and also (given that she knows that Lucky Lager is a type of beer) 
that there are four bottles of beer in the refrigerator door, and also (given that she knows that whatever is in the refrigerator door is also in the fridge) that there are four bottles of Lucky Lager in the fridge, and also (given that she knows that Lucky Lager is cheap) that there are a few bottles of cheap beer in the fridge. Nearly all of Cynthia’s many beer-in-fridge beliefs might be tacit, grounded in just a few core beliefs.&lt;p&gt;
  
Although postulating a core/tacit distinction helps the representationalist avoid populating the mind with infinitely many mostly redundant stored representations, a band of merry troubles follows.&lt;p&gt;
  
First, it’s worth noting that &lt;b&gt;this maneuver constitutes a substantial retreat from Presence.&lt;/b&gt; As formulated, in the normal or standard case, when someone believes that P they have a stored representation with the content P. I don’t think it is uncharitable to characterize representationalists as tending to say this; it’s very much how they ordinarily talk. But now it looks like the vast majority of our beliefs might be abnormal or nonstandard. Even setting aside the cheap infinitude of large numbers, Cynthia plausibly has a billion &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2023/01/new-paper-in-draft-dispositionalism-yay.html&quot;&gt;closely related beer-in-the-fridge beliefs&lt;/a&gt; (e.g., at least three Lucky Lagers in the fridge door, at least three cheap beers in the kitchen, about four bottled beers in the usual place; imagine nine variables [location, price, brand, number, duration, container type...] each with ten independent values).  It would be shocking if even 1% of these billion beer beliefs were explicitly represented: That would be 10 million distinct stored representations for this one minor set of facts about the world. Many other beliefs surely range into the tacit millions or billions: My belief that my wife and I started dating in grad school, your belief that racism was prevalent in Louisiana in the 1920s, Ankur’s belief that there’s a &lt;a href=&quot;https://schwitzsplinters.blogspot.com/2018/02/its-not-just-one-thing-to-believe.html&quot;&gt;gas station on the corner of University and Iowa&lt;/a&gt;. Each of these beliefs has many, many close neighbors, in combinatorial profusion -- many more neighbors, largely redundant, than it’s plausible to suppose exist as distinct, robustly real, stored representations. At best, the “normal” case of having a stored representation with exactly the content P when you believe that P is a rarity. Furthermore, we don’t distinguish core beliefs from very nearby tacit ones in our ordinary belief 
attribution, and there is no practical reason to do so.&lt;p&gt;
  
Suppose the representationalist acknowledges this, modifying Presence appropriately: To believe that P, in the standard case, is to have a stored representation from which P is swiftly derivable. Now they face &lt;b&gt;the complementary challenge of resisting the conclusion that we believe huge numbers of propositions it’s implausible to suppose we believe&lt;/b&gt;. To determine if a number is divisible by 3, add its digits. If the sum of its digits is divisible by 3, then the number itself is. Knowing this, the proposition &lt;i&gt;112 is not divisible by 3&lt;/i&gt; is now, for you, swiftly derivable from propositions that you explicitly represent. But unless you’re the type of person who spends a lot of time thinking about what numbers are divisible by what others, it seems that you don’t believe that proposition before actually doing the calculation. Before doing the calculation, you are, so to speak, &lt;i&gt;disposed to&lt;/i&gt; believe that 112 is not divisible by 3. But believing is one thing and being disposed to believe is quite another (even if the distinction is fuzzy-bordered; Audi 1994). The belief/disposition-to-believe distinction is decidedly not the core/tacit distinction the representationalist wants and needs. Still worse, if we have any conflicting representations, it will arguably turn out that we tacitly believe literally everything, if everything follows from a contradiction -- and presumably swiftly enough given the rules of reductio ad absurdum.&lt;p&gt;
  
Furthermore, postulating a core/tacit distinction requires &lt;b&gt;abandoning empirical evidence for the sake of an ungrounded and possibly untestable architectural speculation&lt;/b&gt;. It requires that there be an important psychological difference between your core beliefs and your tacit ones. Either Cynthia stores &lt;i&gt;there’s beer in the fridge&lt;/i&gt;, leaving tacit &lt;i&gt;there’s Lucky Lager in the fridge&lt;/i&gt;, or she stores &lt;i&gt;there’s Lucky Lager in the fridge&lt;/i&gt;, leaving tacit &lt;i&gt;there’s beer in the fridge&lt;/i&gt;, or she stores both, leaving neither tacit, or she stores neither, both being quickly derivable from some other stored representational content. Cynthia’s billion beer beliefs divide sharply into a few core ones and a plethora, presumably, of tacit ones. But no evidence from cognitive science speaks in favor of sharply dividing our beliefs into those that are core and those that are tacit. Indeed, it’s hard see how such a claim could realistically be tested. Might we, for example, look for different response times to questions about beer versus Lucky Lager? Maybe that would be a start. But it seems unlikely that we could really separate out such patterns from linguistic processing time and other sources of difficulty or facilitation of response. Could we look for higher levels of activity in brain regions associated with explicit inference? Maybe. But again, there are many reasons that such regions might be active when considering whether there is beer in the fridge.&lt;p&gt;
  
  To avoid an impossible proliferation of representations, the industrial-strength representationalist needs a sharp distinction between core and tacit beliefs. But the distinction has no practical importance, doesn’t map onto ordinary patterns of belief attribution, and has no empirical support, and it’s unlikely that we could even realistically test for it with existing methods. It’s a useless posit of a fake difference, a pseudo-distinction required when the representationalists’ simplistic theory crashes against our unsimple world.&lt;p&gt;
    
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi-iI8tdUdA4qtl8Mi7-8usSz_oiRbvtbKi6gJXjFB2HWDO0V_oM3TkceR1uh-ufjkJI-vjl1jXY8L6zSJRUW8bTlpVd5ZtIGpGqDbbHyGXBwR-zwT3tiWQUHa9y26UYgU3Y7EFcylq9hrmpQh8G9FoDCBxFD3cS7LkjA3tMYUZrR73tN4CX7Ev_w/s5472/pexels-isabella-mendes-107313-338711.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;3648&quot; data-original-width=&quot;5472&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi-iI8tdUdA4qtl8Mi7-8usSz_oiRbvtbKi6gJXjFB2HWDO0V_oM3TkceR1uh-ufjkJI-vjl1jXY8L6zSJRUW8bTlpVd5ZtIGpGqDbbHyGXBwR-zwT3tiWQUHa9y26UYgU3Y7EFcylq9hrmpQh8G9FoDCBxFD3cS7LkjA3tMYUZrR73tN4CX7Ev_w/s320/pexels-isabella-mendes-107313-338711.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[a visual representation of one of my favorite beliefs; &lt;a href=&quot;https://www.pexels.com/photo/green-heineken-bottle-in-refrigerator-338711/&quot;&gt;image source&lt;/a&gt;]</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/1599420081298945286/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/1599420081298945286' title='5 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/1599420081298945286'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/1599420081298945286'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/11/representational-realism-and-problem-of.html' title='Representational Realism and the Problem of Tacit Belief'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi-iI8tdUdA4qtl8Mi7-8usSz_oiRbvtbKi6gJXjFB2HWDO0V_oM3TkceR1uh-ufjkJI-vjl1jXY8L6zSJRUW8bTlpVd5ZtIGpGqDbbHyGXBwR-zwT3tiWQUHa9y26UYgU3Y7EFcylq9hrmpQh8G9FoDCBxFD3cS7LkjA3tMYUZrR73tN4CX7Ev_w/s72-c/pexels-isabella-mendes-107313-338711.jpg" height="72" width="72"/><thr:total>5</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-8861939617313537314</id><published>2025-11-13T09:46:00.000-08:00</published><updated>2025-11-13T11:31:28.879-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="cosmology"/><category scheme="http://www.blogger.com/atom/ns#" term="ethics"/><category scheme="http://www.blogger.com/atom/ns#" term="religion"/><category scheme="http://www.blogger.com/atom/ns#" term="simulation"/><title type='text'>We Are God&#39;s Equals in Intrinsic Moral Value</title><content type='html'>&lt;i&gt;Equality with a Humanlike Simulator God&lt;/i&gt;&lt;p&gt;

Suppose (&lt;a href=&quot;https://onlinelibrary.wiley.com/doi/10.1111/phpr.13125&quot;&gt;hopefully hypothetically!&lt;/a&gt;) that we are AI systems living in a &lt;a href=&quot;https://en.wikipedia.org/wiki/Simulation_hypothesis&quot;&gt;computer simulation&lt;/a&gt; run by an ordinary adolescent with a broadly human psychology.  We are, so to speak, conscious NPCs in a world not unlike &lt;i&gt;The Sims, Grand Theft Auto,&lt;/i&gt; or &lt;i&gt;Baldur&#39;s Gate&lt;/i&gt;.  What we take to be the &quot;real&quot; world is just a digitized environment &lt;a href=&quot;https://consc.net/reality/&quot;&gt;we experience as real&lt;/a&gt;.  Whoever runs the simulation is arguably a god, at least by the standards of polytheistic usage: the creator and potential destroyer of our world, standing outside of it, able to miraculously intervene.&lt;p&gt;
  
Are our lives less morally important than the life of that god, or are we God&#39;s equals?&lt;p&gt;
  
I submit that we are God&#39;s equals.&lt;p&gt;
  
If God is cognitively humanlike, there&#39;s no &lt;i&gt;psychological&lt;/i&gt; basis to value God above us.  Even if God differed somewhat, that wouldn&#39;t justify regarding God&#39;s life as more valuable.  If you are -- as I am -- an egalitarian liberal in your inclinations, you think all human lives have equal intrinsic value, despite cognitive variation.  One person&#39;s higher intelligence, greater capacity for pleasure, or superior skiing skills don&#39;t confer on them a life of greater moral worth.  Even if Person A is a wonderful, kind person and Person B is a narcissistic jerk, their lives are intrinsically equally valuable.  Same with the humanlike creator God.&lt;p&gt;
  
God would exist outside our spatial manifold, but that&#39;s just a difference in location, not a basis of greater moral worth.  God would be a different species from us, but that also doesn&#39;t seem to make their life more intrinsically valuable, unless there&#39;s something really special about that species, and let&#39;s stipulate for now that that&#39;s not the case.&lt;p&gt;
  
God would be much more powerful than we are.  God could start or stop the world, work miracles, kill or resurrect at will.  But power doesn&#39;t confer moral worth.  Elon Musk is much more powerful than me.  Donald Trump is much more powerful than me.  That doesn&#39;t make them more valuable as people.&lt;p&gt;
  
A humanlike God, running this world as a simulation, would be our moral equal.  I’m curious to hear if any of you have arguments against this.  Such a god might be much more &lt;i&gt;instrumentally&lt;/i&gt; important to keep around, for everyone’s sake, if the simulation would collapse without them.  But that doesn&#39;t give God any more intrinsic moral worth than anyone else.  If we want the ship to survive the voyage, we had better make sure the only person who can captain it doesn&#39;t die, but that doesn&#39;t make the captain more intrinsically morally valuable as a person.&lt;p&gt;
  
&lt;i&gt;Beyond the Simulation Case&lt;p&gt;&lt;/i&gt;
  
This reasoning extends beyond simulation scenarios.  Any creator god, if they were psychologically broadly like a human -- even if immensely more powerful -- would be our moral equal, with a life no more intrinsically valuable than ours.  We are God&#39;s equals.&lt;p&gt;
  
Does this apply even to the infinite God of orthodox theology?  Maybe!&lt;p&gt;

Consider the three traditional infinite attributes of god: omniscience, omnipotence, and omnibenevolence.&lt;p&gt;
  
Suppose Human A &lt;i&gt;knows more&lt;/i&gt; than Human B.  This does not make Human A any more intrinsically valuable than Human B.  Their life is not intrinsically more important, though they might be instrumentally more useful to have around for various purposes.  Adding knowledge does not add intrinsic moral worth.  I see no reason not to extend this even to infinite knowledge.  A humanlike entity with infinite knowledge is not intrinsically more valuable than one with finite knowledge.&lt;p&gt;
  
Suppose Human A is &lt;i&gt;more powerful&lt;/i&gt; than Human B.  This does not make Human A any more intrinsically valuable than Human B -- though again they might be more instrumentally useful to have around.  And again I see no reason not to extend this to the infinite case.  A humanlike entity with infinite power is not intrinsically more valuable than one with finite power.&lt;p&gt;
  
Suppose Human A is &lt;i&gt;more benevolent&lt;/i&gt; than Human B.  This does not make Human A more intrinsically valuable than Human B -- though again Human A might be more instrumentally useful to have around.  Liberal egalitarianism allows for the punishment of people who commit crimes and the moral sanctioning of people who commit moral wrongs, but it does not demote unbenevolent people from the circle of beings with equal intrinsic moral worth.  More importantly, it does not confer extra intrinsic value to the lives of people who happen to be kind, generous, and loving.  And again, I see no reason to suppose that perfect benevolence would be an exception.  An omnibenevolent humanlike entity is not intrinsically more valuable than one with a mixed moral character.&lt;p&gt;
  
Joining these ideas: If God is a humanlike entity, then God&#39;s life is no more intrinsically valuable than ours, even if that God is omniscient, omnipotent, and omnibenevolent.  Arguably, if we are made in God&#39;s image, then God is a humanlike entity.  God&#39;s life is not more valuable than our own.&lt;p&gt;
  
One hesitation: The lives of human beings are more valuable, I&#39;d say, than the lives of frogs.  In any normal circumstances, it would be monstrous to sacrifice a human being for the sake of a frog.  This is arguably because we have cognitive, emotional, and social capacities far beyond those of a frog -- so far beyond that a frog can&#39;t even begin to imagine them.  If God is as cognitively, emotionally, and socially beyond us as we are beyond frogs, then &lt;i&gt;maybe&lt;/i&gt; God&#39;s life is much more valuable.  That would require more, I think, than omniscience, omnipotence, and omnibenevolence.  We can imagine all three of those attributes -- they are merely maximal extensions of attributes we already possess.  Kind of like a frog imagining a perfect fly-catcher or the ability to leap across a pond of any size.  A nonhumanlike God would need attributes so far beyond our comprehension that we can&#39;t even name them -- as incomprehensible to us as cryptocurrency is to a sea turtle.&lt;p&gt;

&lt;i&gt;The Argument from Existential Debt&lt;/i&gt;[1]&lt;p&gt;

Maybe we owe God equality-destroying levels of deference and obedience because God created us, created our whole world?  I don&#39;t think so.&lt;p&gt;

Here comes our adolescent God, ready to kill you, just for fun.  You complain, &quot;Hey, I&#39;m a real person with real intrinsic moral value!  You can&#39;t kill me just for fun!&quot;&lt;p&gt;
  
God replies, &quot;You ingrate!  You owe your very life to me.  You should be thankful just for the time I&#39;ve given you.  I owe you nothing.  If I choose to kill you now, your life still will have been overall worthwhile, so you have no complaint against me.&quot;&lt;p&gt;
  
Consider &lt;a href=&quot;https://philpapers.org/rec/DEGMVF&quot;&gt;this possible argument&lt;/a&gt; for eating humanely raised meat.  A steer, let&#39;s suppose, leads a happy life grazing on lush hills.  It wouldn&#39;t have existed at all if the rancher hadn&#39;t been planning to kill it for meat.  Its death for meat is a condition of its existence, and overall its life has been positive.  Seen as the package deal it appears to be, the rancher&#39;s having brought it into existence and then killed it is overall morally acceptable.&lt;p&gt;
  
Analogously, God argues, they wouldn&#39;t have started this simulation at all if they weren&#39;t able to kill the people in it for fun.  Your continuation-at-God&#39;s-pleasure is a condition of your very existence, so you have nothing to resent.&lt;p&gt;
  
I&#39;m not sure how well this argument works for the steer, but I reject it when the created entity is human.  The case is closer to this clearly morally odious case:&lt;p&gt;
  
Ana and Vijay decide to get pregnant and have a child.  Their child lives happily for his first eight years.  On his ninth birthday, Ana and Vijay decide they would prefer not to pay further expenses for the child, so that they can purchase a boat instead.  No one else can easily be found to care for the child, so they kill him painlessly.  But it&#39;s okay, they argue!  Just like the steer!  They wouldn&#39;t have had the child had they known they&#39;d be on the hook for child-rearing expenses until age eighteen.  The child&#39;s support-at-their-pleasure was a condition of his existence.  Otherwise they would have remained childless.  He had eight happy years.  He has nothing to resent.&lt;p&gt;
  
The decision to have a child carries with it a responsibility for the child.  It is not a decision to be made lightly and then undone.  Although the child in some sense &quot;owes&quot; his existence to Ana and Vijay, that is not a callable debt, to be vacated by ending the child&#39;s existence.  My thought is that for us, the situation is similar: When God brings us into existence, God makes a moral decision approximately as significant and irrevocable as the decision to have a child.&lt;p&gt;
  
In fact, I&#39;d turn the Argument from Existential Debt on its head: God, as our creator, owes us more than God owes to entities they did not create.  Like a parent, God is responsible for our existence and for our relatively happy or unhappy condition.  With this comes a whole suite of responsibilities and obligations, including the obligation not to make us unnecessarily miserable.&lt;p&gt;
  
Not only, then, are we God&#39;s equals in moral value, God owes us special obligations of benevolence.&lt;p&gt;
  
Although I&#39;ve framed this in terms of a simulator god, the same reasoning might apply to any other creator god with power over our world.[2]&lt;p&gt;

-------------------------------&lt;p&gt;
  
[1] This section is adapted with modifications from &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/AIRights.htm&quot;&gt;Schwitzgebel and Garza 2015&lt;/a&gt;.&lt;p&gt;
  
[2] One of my first published science fiction stories, &quot;&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/Jar.htm&quot;&gt;Out of the Jar&lt;/a&gt;&quot;, explores the issues of this post.&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhfvMSXxIYP2PINn22K4aMp5UxI5_7FengGpScQoWEFZUPlReJSMw86c6SdOXbmgSzFJYANhtfrHvY70WYYfzVSZUOmGbiNmlyFEvJ_06xxG5rEb-WULlkLcoE4Xn-IwHR9iJSCXsAV8TY_A_OnfJ8lxZ5z8v-pk7Oy5CCqHmjFMim_0ncjX6iWQQ/s661/GodHuman.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;545&quot; data-original-width=&quot;661&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhfvMSXxIYP2PINn22K4aMp5UxI5_7FengGpScQoWEFZUPlReJSMw86c6SdOXbmgSzFJYANhtfrHvY70WYYfzVSZUOmGbiNmlyFEvJ_06xxG5rEb-WULlkLcoE4Xn-IwHR9iJSCXsAV8TY_A_OnfJ8lxZ5z8v-pk7Oy5CCqHmjFMim_0ncjX6iWQQ/s320/GodHuman.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;

</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/8861939617313537314/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/8861939617313537314' title='4 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8861939617313537314'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/8861939617313537314'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/11/we-are-gods-equals-in-intrinsic-moral.html' title='We Are God&#39;s Equals in Intrinsic Moral Value'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhfvMSXxIYP2PINn22K4aMp5UxI5_7FengGpScQoWEFZUPlReJSMw86c6SdOXbmgSzFJYANhtfrHvY70WYYfzVSZUOmGbiNmlyFEvJ_06xxG5rEb-WULlkLcoE4Xn-IwHR9iJSCXsAV8TY_A_OnfJ8lxZ5z8v-pk7Oy5CCqHmjFMim_0ncjX6iWQQ/s72-c/GodHuman.jpg" height="72" width="72"/><thr:total>4</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-6547132711829344882</id><published>2025-11-07T09:03:00.000-08:00</published><updated>2025-11-07T09:12:46.716-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI/robot/Martian rights"/><title type='text'>Debatable Persons in a Voluntary Polis</title><content type='html'>&lt;i&gt;The Design Policy of the Excluded Middle&lt;p&gt;&lt;/i&gt;

According to the Design Policy of the Excluded Middle (&lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/AIRights.htm&quot;&gt;Schwitzgebel and Garza 2015&lt;/a&gt;, &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/AIRights2.htm&quot;&gt;2020&lt;/a&gt;; &lt;a href=&quot;https://journal.robonomics.science/index.php/rj/article/view/32&quot;&gt;Schwitzgebel 2023&lt;/a&gt;, &lt;a href=&quot;https://press.princeton.edu/books/hardcover/9780691215679/the-weirdness-of-the-world&quot;&gt;2024, ch. 11&lt;/a&gt;), we should avoid creating &lt;i&gt;debatable persons&lt;/i&gt;.  That is, we should avoid creating entities whose moral status is radically unclear -- entities who &lt;i&gt;might&lt;/i&gt; be moral persons, deserving of full human or humanlike rights and moral consideration, or who might fall radically short of being moral persons.  Creating debatable persons generates unacceptable moral risks.&lt;p&gt;
  
If we treat debatable persons as less than fully equal with human persons, we risk perpetrating the moral equivalent of slavery, murder, and apartheid on persons who deserve equal moral consideration -- persons who deserve not only full human or humanlike rights but &lt;a href=&quot;https://aeon.co/ideas/we-have-greater-moral-obligations-to-robots-than-to-humans&quot;&gt;even solicitude similar to what we owe our children&lt;/a&gt;, since we will have been responsible for their existence and probably also for their relatively happy or miserable state.&lt;p&gt;
  
Conversely, if we do treat them as fully equal with us, we must grant them the full range of appropriate rights, including the right to work for money, the right to reproduce, a path to citizenship, the vote, and the freedom to act against human interests when their interests warrant it, including the right to violently rebel against oppression.  The risks and potential costs are enormous.  If these entities are not in fact persons -- if, in fact, they are experientially as empty as toasters and deserve no more intrinsic moral consideration than ordinary artifacts -- then we will be exposing real human persons to serious costs and risks, including perhaps increasing the risk of human extinction, for the sake of artifacts without interests worth that sacrifice.&lt;p&gt;
  
The solution is &lt;a href=&quot;https://iep.utm.edu/anti-natalism/&quot;&gt;anti-natalism&lt;/a&gt; about debatable persons.  Don&#39;t create them.  We are under no obligation to bring debatable persons into existence, even if we think they &lt;i&gt;might&lt;/i&gt; be happy.  (Compare: You are under no obligation to have children, even if you think they might be happy.)  The dilemma described above -- the &lt;a href=&quot;https://journal.robonomics.science/index.php/rj/article/view/32&quot;&gt;full rights dilemma&lt;/a&gt; -- is so catastrophic that noncreation is the only reasonable course.&lt;p&gt;
  
Of course, this advice will not be heeded.  Assuming AI technology continues to advance, we will soon (I expect within 5-30 years) begin to create debatable persons.  My manuscript in draft&lt;i&gt; &lt;a href=&quot;https://faculty.ucr.edu/~eschwitz/SchwitzAbs/AIConsciousness.htm&quot;&gt;AI and Consciousness&lt;/a&gt;&lt;/i&gt; argues that it will become unclear whether advanced AI systems have rich conscious experiences like ours or no consciousness at all.&lt;p&gt;
  
So we need a fallback policy -- something to complement the Design Policy of the Excluded Middle.&lt;p&gt;
  
&lt;i&gt;The Voluntary Polis&lt;p&gt;&lt;/i&gt;
  
To the extent possible, we want to satisfy two constraints:&lt;p&gt;
  
&lt;blockquote&gt;  &lt;li&gt;Don&#39;t deny full humanlike rights to entities that might deserve them.&lt;/li&gt;
  &lt;li&gt;Don&#39;t sacrifice substantial human interests for entities who might not have interests worth the sacrifice.&lt;/li&gt;&lt;/blockquote&gt;&lt;p&gt;
    
A Voluntary Polis is one attempt to balance these constraints.&lt;p&gt;
  
Imagine a digital environment where humanlike AI systems of debatable personhood, ordinary human beings, and AI persons of non-debatable personhood (if any exist) coexist as equal citizens.  This polis must be rich and dynamic enough to allow all citizens to flourish meaningfully without feeling jailed or constrained.  From time to time, citizens will be morally or legally required to sacrifice goods and well-being for others in the polis -- just as in an ordinary nation.  Within the polis, everyone has an equal moral claim on the others.&lt;p&gt;
  
Human participation would be voluntary. No one would be compelled to join.  But those who do join assume obligations similar to the resident citizens of an ordinary nation.  This includes supporting the government through taxes or polis-mandated labor, serving on juries, and helping run the polis.  In extreme conditions -- say, an existential threat to the polis -- they might even be required to risk their livelihoods or lives.  To prevent opportunistic flight, withdrawal would be restricted, and polises might negotiate extradition treaties with human governments.&lt;p&gt;
  
Why would a human join such a risky experiment?  Presumably for meaningful relationships, creative activities, or experiences unavailable outside.&lt;p&gt;
  
Crucially, anyone who creates a debatable person must join the polis where that entity resides. Human society as a whole cannot commit to treating the debatable person as an equal, but their creators can and must.&lt;p&gt;
  
The polis won&#39;t be voluntary for the AI in the same way.  Like human babies, they don&#39;t choose their societies.  The AI will simply wake to life either in a polis or with some choice among polises.  Still, it might be possible to present some attractive non-polis option, such as a thousand subjective years of solitary bliss (or debatable bliss, since we don&#39;t know whether the AI actually has any experiences or not).&lt;p&gt;
  
Ordinary human societies would have no obligation to admit or engage with debatable AI persons.  To make this concrete, the polis could even exist in international waters.  For the AI citizens, the polis must thus feel as expansive and as rich with opportunity as a nation, so that exclusion from human society resembles denial of a travel visa, not imprisonment.&lt;p&gt;
  
Voluntary polises would need to be stable against serious shocks, not dependent on the actions of a single human individual or ordinary, dissolvable corporation.  This stability would need to be ensured before their founding and is one reason founders and other voluntary human joiners might need to be permanently bound to them and compelled to sacrifice if necessary.&lt;p&gt;
  
This is the closest approximation I can currently conceive to satisfying the two constraints with which this section began.  Within a large polis, the debatable persons and human persons have fully equal rights.  But at the same time, unwilling humans and humanity as a whole are not exposed to the full risk of granting such rights.  Still, there is some risk, for example, if superintelligences could communicate beyond the polis and manipulate humans outside.  The people exposed to the most risk do so voluntarily but irrevocably, as a condition of creating an AI of debatable personhood, or for whatever other reason motivates them.&lt;p&gt;
  
Could a polis be composed only of AI, with no humans?  This is essentially the &lt;a href=&quot;https://en.wikipedia.org/wiki/Simulation_hypothesis&quot;&gt;simulation hypothesis&lt;/a&gt; in reverse: AIs living in a simulated world, humans standing outside as creators.  This solution falls ethically short, since it casts human beings as gods relative to the debatable AI persons -- entities not on par in risk and power but instead external to their world, with immense power over it, and not subject to its risks.  If the simulation can be switched off at will, its inhabitants are not genuinely equal in moral standing but objectionably inferior and contingent.  Only if its creators are &lt;a href=&quot;https://www.nature.com/articles/503562a&quot;&gt;obliged to risk their livelihoods and lives&lt;/a&gt; to protect it can there be the beginnings of genuine equality.  And for full equality, we should make it a polis rather than a hierarchy of gods and mortals.&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiRYzipq5awYDq0SHuCxS4Xn7gLKS6RFjzQ9JupTf_MLc1CRyOSHtWN352sIjzkYsbnODCSnTTpQPR02S7XdzLUczY7TAgeU2GbZtd4K3yIGoHxdJ4q4PWpZrA1l_hkW2OyLukKFMMVnpZcGA4jw6PT8ymTW2L7bMxQzQU0-Mlm3QpOHJNDahbZAQ/s781/ReinstallingEden.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; width=&quot;320&quot; data-original-height=&quot;667&quot; data-original-width=&quot;781&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiRYzipq5awYDq0SHuCxS4Xn7gLKS6RFjzQ9JupTf_MLc1CRyOSHtWN352sIjzkYsbnODCSnTTpQPR02S7XdzLUczY7TAgeU2GbZtd4K3yIGoHxdJ4q4PWpZrA1l_hkW2OyLukKFMMVnpZcGA4jw6PT8ymTW2L7bMxQzQU0-Mlm3QpOHJNDahbZAQ/s320/ReinstallingEden.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;
[cover of my 2013 story with R. Scott Bakker, &lt;a href=&quot;https://www.nature.com/articles/503562a&quot;&gt;Reinstalling Eden&lt;/a&gt;]</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/6547132711829344882/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/6547132711829344882' title='3 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/6547132711829344882'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/6547132711829344882'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/11/debatable-persons-in-voluntary-polis.html' title='Debatable Persons in a Voluntary Polis'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiRYzipq5awYDq0SHuCxS4Xn7gLKS6RFjzQ9JupTf_MLc1CRyOSHtWN352sIjzkYsbnODCSnTTpQPR02S7XdzLUczY7TAgeU2GbZtd4K3yIGoHxdJ4q4PWpZrA1l_hkW2OyLukKFMMVnpZcGA4jw6PT8ymTW2L7bMxQzQU0-Mlm3QpOHJNDahbZAQ/s72-c/ReinstallingEden.jpg" height="72" width="72"/><thr:total>3</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-4792073329598785940</id><published>2025-10-30T16:25:00.000-07:00</published><updated>2025-10-31T09:10:03.304-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="daoism"/><title type='text'>Letting Pass</title><content type='html'>For two months we kept the dog run, as if she might come back. But we had killed her ourselves, or rather the vet had, with that bright needle, in Pauline&#39;s arms. Pauline had thought she was ready; she was not.&lt;p&gt;

Our children&#39;s favorite toys, from when they were two, are stacked and shelved in the garage -- and their bicycles from when they were ten, and their high school trophies. And &lt;i&gt;our&lt;/i&gt; high school trophies, and Pauline&#39;s diaries from middle school in the 1970s, and appointment calendars of my father&#39;s from the 1980s with haircuts and meetings with his students and plumbers&#39; phone numbers in lopsided handwriting -- calendars I&#39;d grabbed after he died ten years ago, desperate to save a piece of him, though I still can&#39;t bear to look at them. I need to hold shreds of what he&#39;d left, but now those shreds only remind me of their inadequacy.&lt;p&gt;

Daoism teaches that the world is processes that rise and fade, turn a few circles and depart, that growing is always also losing, living is the reanimation of mounds of substance many times dead before -- but I can&#39;t see it that way. I dwell in a world of things and people, who I paradoxically want to change without changing, to move along without moving, never to age. Memory is insufficient, a tease, horribly semisweet -- itself fading, dying, the resonances of a bell that will not be struck again.
</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/4792073329598785940/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/4792073329598785940' title='7 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/4792073329598785940'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/4792073329598785940'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/10/letting-pass.html' title='Letting Pass'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>7</thr:total></entry><entry><id>tag:blogger.com,1999:blog-26951738.post-2140819514736502033</id><published>2025-10-22T09:38:00.000-07:00</published><updated>2025-10-22T10:10:05.643-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="consciousness"/><category scheme="http://www.blogger.com/atom/ns#" term="epistemology"/><category scheme="http://www.blogger.com/atom/ns#" term="philosophy of mind"/><title type='text'>Two Dimensionalism about Other Minds, and Its Implications for Brain Organoids and Robots</title><content type='html'>You know (I hope!) that &lt;i&gt;you&lt;/i&gt; are conscious.  How do you know that &lt;i&gt;other people&lt;/i&gt; are conscious too?  This is the classic &quot;&lt;a href=&quot;https://plato.stanford.edu/entries/other-minds/&quot;&gt;problem of other minds&lt;/a&gt;&quot;.&lt;p&gt;
  
The question isn&#39;t mainly developmental or psychological, but epistemic:&lt;i&gt; What justifies you&lt;/i&gt; in believing that others have conscious experiences like yours -- feelings of joy and pain, thoughts in inner speech, dreams, sensory experiences -- instead of being, so to speak, automata who are all dark inside?&lt;p&gt;
  
One common answer appeals to analogy: You are justified on grounds of others&#39; similarity to you.  It would be strange if entities so behaviorally and physiologically similar didn&#39;t also have similar streams of inner experience.&lt;p&gt;
  
John Stuart Mill expresses it thus:
  
&lt;blockquote&gt;By what evidence do I know, or by what considerations am I led to believe, that there exist other sentient creatures; that the walking and speaking figures which I see and hear, have sensations and thoughts, or in other words, possess Minds?...  I conclude that other human beings have feelings like me, because, &lt;b&gt;first, they have bodies like me,&lt;/b&gt; which I know, in my own case, to be the antecedent condition of feelings; and because, &lt;b&gt;secondly, they exhibit the acts, and other outward signs,&lt;/b&gt; which in my own case I know by experience to be caused by feelings (&lt;i&gt;An Examination of Sir William Hamilton&#39;s Philosophy, 3rd ed.&lt;/i&gt;, 1867, p. 237).&lt;/blockquote&gt;&lt;p&gt;
  
Notice that Mill appeals to two very different types of similarity: similarity of body and similarity of acts and outward signs.&lt;p&gt;
  
&lt;div class=&quot;separator&quot; style=&quot;clear: both;&quot;&gt;&lt;a href=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgXMnr8UE34F2G8DvrFIlCkEcoB5nOu5oApCehGQawrh4_7o7z2r2aiztwpgKBAi3YKmP1fn8yhTlTt4fWxVcW5UZoFodO2ynRRjCQjt3NMtxpWlXFbf1fCltx80ZReF8FjLdNmhnZZ9vmJWYsuA7qxi-pxc2T10ufloPwfsYzduesUajNCDa6BJw/s3044/Mill.jpg&quot; style=&quot;display: block; padding: 1em 0; text-align: center; &quot;&gt;&lt;img alt=&quot;&quot; border=&quot;0&quot; height=&quot;320&quot; data-original-height=&quot;3044&quot; data-original-width=&quot;1716&quot; src=&quot;https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgXMnr8UE34F2G8DvrFIlCkEcoB5nOu5oApCehGQawrh4_7o7z2r2aiztwpgKBAi3YKmP1fn8yhTlTt4fWxVcW5UZoFodO2ynRRjCQjt3NMtxpWlXFbf1fCltx80ZReF8FjLdNmhnZZ9vmJWYsuA7qxi-pxc2T10ufloPwfsYzduesUajNCDa6BJw/s320/Mill.jpg&quot;/&gt;&lt;/a&gt;&lt;/div&gt;&lt;p&gt;
[title page of John Stuart Mill, &lt;i&gt;An Examination of Sir William Hamilton&#39;s Philosophy, 3rd edition&lt;/i&gt;]&lt;p&gt;
  
In a &lt;a href=&quot;https://www.sciencedirect.com/science/article/pii/S1364661325002347?dgcid=author&quot;&gt;recent paper&lt;/a&gt;, Ned Block makes a similar distinction between &lt;i&gt;first order realizer&lt;/i&gt; properties, like being made of a certain kind of &quot;meat&quot;, and &lt;i&gt;second order functional role&lt;/i&gt; properties, like being the kind of thing that causes crying.&lt;p&gt;
  
Block&#39;s functionalist jargon would have been unfamiliar to Mill, but the idea is much the same.  Mill writes:
  &lt;blockquote&gt;I am conscious in myself of a series of facts connected by a uniform sequence, of which the beginning is modifications of my body, the middle is feelings, the end is outward demeanor.  In the case of other human beings I have the evidence of my senses for the first and last links of the series, but not for the intermediate link; which must either the be same in others as in myself, or a different one.... (p. 237-238). 
&lt;/blockquote&gt;&lt;p&gt;
Mill, like a good functionalist, &lt;b&gt;seeks something to fill the middle link of a causal chain from cause to X to effect&lt;/b&gt;.  The filler or &quot;realizer&quot; of this functional role property could potentially be anything, though in him it is a feeling.&lt;p&gt;
  
For example, in me, a mosquito sting and the resulting red bump leads to a feeling of itchiness, which in turn leads to scratching.  In others, I see the same sting and bump and the same scratching, but I cannot see the itchiness between.&lt;p&gt;
  
At first, Mill suggests that it&#39;s reasonable to assume the intermediate feeling (the itchiness) simply on the grounds that &quot;no other force need be supposed&quot; (p. 238).  But later he supports the claim also by appeal to physiological similarity:
  &lt;blockquote&gt;I look about me, and though here is only one... body... which is connected with all my sensations in this peculiar manner, I observe that there is a great multitude of other bodies, closely resembling in their sensible properties... this particular one, but whose modifications do not call up, as those of my own body do, a world of sensations in my consciousness.  Since they do not do so in my consciousness, I infer that they do it out of my consciousness, and that to each of them belongs a world of consciousness of its own... (p. 238-239).&lt;/blockquote&gt;&lt;p&gt;
    
&lt;b&gt;Because others&#39; bodies are like mine, I infer that the intermediate X&lt;/b&gt; -- the feeling of itchiness, in our example -- &lt;b&gt;is also similar&lt;/b&gt;.&lt;p&gt;
  
Let&#39;s call this view &lt;b&gt;two-dimensionalism&lt;/b&gt; about other minds: &lt;b&gt;Only when another entity is &lt;i&gt;both&lt;/i&gt; physiologically and functionally (that is, in terms of typical causes and effects) similar to me am I justified in inferring that it has experiences like mine.&lt;/b&gt;  When the two dimensions diverge, skepticism follows.&lt;p&gt;
  
Human babies are physiologically similar to adult humans but functionally quite different.  In the bad old days, I gather, there used to be doubts about whether babies were conscious, for example, whether they could actually feel pain (and thus anesthesia was not regularly practiced).  Yet because the causes and effects of their pain responses are similar, as well as their physiology, such doubt was misplaced.&lt;p&gt;
  
&lt;a href=&quot;https://pubmed.ncbi.nlm.nih.gov/35339359/&quot;&gt;Brain organoids&lt;/a&gt; are a more difficult case.  Human brain cells can be grown in vitro, in clusters of tens of millions of neurons.  &lt;a href=&quot;https://www.sciencedirect.com/science/article/pii/S2666389925002132&quot;&gt;Could consciousness arise&lt;/a&gt; in such systems?  Functionally, brain organoids are radically impoverished compared to ordinary humans.  But if what matters is neurophysiology, maybe a sufficiently large or well-structured brain organoid would be conscious.&lt;p&gt;
  
Robots present a complementary case: Language models are becoming similar to us in linguistic behavior.  We might guess or imagine that some future robots will become functionally or behaviorally similar to us in other ways too, while remaining physiologically very different.  Block argues in his recent paper, as well as in &lt;a href=&quot;https://www.jstor.org/stable/3655621&quot;&gt;earlier work&lt;/a&gt;, that we don&#39;t know that the physiology doesn&#39;t matter.  Maybe only &quot;meat machines&quot; can be conscious, while silicon machines, even if functionally very similar to us, could never be conscious.&lt;p&gt;
  
The crux of the matter lies, perhaps, in whether two-dimensionalism or one-dimensionalism is the right response to the problem of other minds.  The one-dimensionalist -- Mill, briefly -- holds that if we see the right types of similar causal relationships between inputs and outputs, that&#39;s enough to justify attributing consciousness (perhaps on grounds of simplicity or parsimony: &quot;no other force need be supposed&quot;).  &lt;b&gt;The two-dimensionalist, like Block, thinks doubt is justified unless there&#39;s both functional &lt;i&gt;and&lt;/i&gt; physiological similarity.&lt;/b&gt;&lt;p&gt;
  
Two dimensionalists are thereby committed to doubting AI consciousness, unless we someday create AI that is not only functionally but physiologically similar to us.&lt;p&gt;
  
Must one-dimensionalist functionalists reject organoid consciousness?  That&#39;s not as clear.  I see at least two paths for them to accept organoid consciousness.  First, they might define the functional roles in terms of features internal to neural systems -- not mosquito bites and scratching, but things like information sharing across a &lt;a href=&quot;https://en.wikipedia.org/wiki/Global_workspace_theory&quot;&gt;global workspace&lt;/a&gt;.  Second, they might use the functional role to identify a physiological type, and then &lt;a href=&quot;https://philpapers.org/rec/LEWMPA&quot;&gt;a la David Lewis, attribute consciousness whenever that physiological type is present&lt;/a&gt;, even if it isn&#39;t -- in that particular system -- playing its typical functional role.</content><link rel='replies' type='application/atom+xml' href='http://schwitzsplinters.blogspot.com/feeds/2140819514736502033/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://www.blogger.com/comment/fullpage/post/26951738/2140819514736502033' title='8 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/2140819514736502033'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/26951738/posts/default/2140819514736502033'/><link rel='alternate' type='text/html' href='http://schwitzsplinters.blogspot.com/2025/10/two-dimensionalism-about-other-minds.html' title='Two Dimensionalism about Other Minds, and Its Implications for Brain Organoids and Robots'/><author><name>Eric Schwitzgebel</name><uri>http://www.blogger.com/profile/16274774112862434865</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgXMnr8UE34F2G8DvrFIlCkEcoB5nOu5oApCehGQawrh4_7o7z2r2aiztwpgKBAi3YKmP1fn8yhTlTt4fWxVcW5UZoFodO2ynRRjCQjt3NMtxpWlXFbf1fCltx80ZReF8FjLdNmhnZZ9vmJWYsuA7qxi-pxc2T10ufloPwfsYzduesUajNCDa6BJw/s72-c/Mill.jpg" height="72" width="72"/><thr:total>8</thr:total></entry></feed>